16 16 599 599 2851 2847 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 | // SPDX-License-Identifier: GPL-2.0+ /* * A wrapper for multiple PHYs which passes all phy_* function calls to * multiple (actual) PHY devices. This is comes handy when initializing * all PHYs on a HCD and to keep them all in the same state. * * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */ #include <linux/device.h> #include <linux/list.h> #include <linux/phy/phy.h> #include <linux/of.h> #include "phy.h" struct usb_phy_roothub { struct phy *phy; struct list_head list; }; /* Allocate the roothub_entry by specific name of phy */ static int usb_phy_roothub_add_phy_by_name(struct device *dev, const char *name, struct list_head *list) { struct usb_phy_roothub *roothub_entry; struct phy *phy; phy = devm_of_phy_get(dev, dev->of_node, name); if (IS_ERR(phy)) return PTR_ERR(phy); roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); if (!roothub_entry) return -ENOMEM; INIT_LIST_HEAD(&roothub_entry->list); roothub_entry->phy = phy; list_add_tail(&roothub_entry->list, list); return 0; } static int usb_phy_roothub_add_phy(struct device *dev, int index, struct list_head *list) { struct usb_phy_roothub *roothub_entry; struct phy *phy; phy = devm_of_phy_get_by_index(dev, dev->of_node, index); if (IS_ERR(phy)) { if (PTR_ERR(phy) == -ENODEV) return 0; else return PTR_ERR(phy); } roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); if (!roothub_entry) return -ENOMEM; INIT_LIST_HEAD(&roothub_entry->list); roothub_entry->phy = phy; list_add_tail(&roothub_entry->list, list); return 0; } struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) { struct usb_phy_roothub *phy_roothub; int i, num_phys, err; if (!IS_ENABLED(CONFIG_GENERIC_PHY)) return NULL; num_phys = of_count_phandle_with_args(dev->of_node, "phys", "#phy-cells"); if (num_phys <= 0) return NULL; phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL); if (!phy_roothub) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&phy_roothub->list); if (!usb_phy_roothub_add_phy_by_name(dev, "usb2-phy", &phy_roothub->list)) return phy_roothub; for (i = 0; i < num_phys; i++) { err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list); if (err) return ERR_PTR(err); } return phy_roothub; } EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc); /** * usb_phy_roothub_alloc_usb3_phy - alloc the roothub * @dev: the device of the host controller * * Allocate the usb phy roothub if the host use a generic usb3-phy. * * Return: On success, a pointer to the usb_phy_roothub. Otherwise, * %NULL if no use usb3 phy or %-ENOMEM if out of memory. */ struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev) { struct usb_phy_roothub *phy_roothub; int num_phys; if (!IS_ENABLED(CONFIG_GENERIC_PHY)) return NULL; num_phys = of_count_phandle_with_args(dev->of_node, "phys", "#phy-cells"); if (num_phys <= 0) return NULL; phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL); if (!phy_roothub) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&phy_roothub->list); if (!usb_phy_roothub_add_phy_by_name(dev, "usb3-phy", &phy_roothub->list)) return phy_roothub; return NULL; } EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc_usb3_phy); int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_init(roothub_entry->phy); if (err) goto err_exit_phys; } return 0; err_exit_phys: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_exit(roothub_entry->phy); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_init); int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err, ret = 0; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_exit(roothub_entry->phy); if (err) ret = err; } return ret; } EXPORT_SYMBOL_GPL(usb_phy_roothub_exit); int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub, enum phy_mode mode) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_set_mode(roothub_entry->phy, mode); if (err) goto err_out; } return 0; err_out: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_power_off(roothub_entry->phy); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode); int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_calibrate(roothub_entry->phy); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate); /** * usb_phy_roothub_notify_connect() - connect notification * @phy_roothub: the phy of roothub, if the host use a generic phy. * @port: the port index for connect * * If the phy needs to get connection status, the callback can be used. * Returns: %0 if successful, a negative error code otherwise */ int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_notify_connect(roothub_entry->phy, port); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_connect); /** * usb_phy_roothub_notify_disconnect() - disconnect notification * @phy_roothub: the phy of roothub, if the host use a generic phy. * @port: the port index for disconnect * * If the phy needs to get connection status, the callback can be used. * Returns: %0 if successful, a negative error code otherwise */ int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_notify_disconnect(roothub_entry->phy, port); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_disconnect); int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_power_on(roothub_entry->phy); if (err) goto err_out; } return 0; err_out: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_power_off(roothub_entry->phy); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_power_on); void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; if (!phy_roothub) return; list_for_each_entry_reverse(roothub_entry, &phy_roothub->list, list) phy_power_off(roothub_entry->phy); } EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off); int usb_phy_roothub_suspend(struct device *controller_dev, struct usb_phy_roothub *phy_roothub) { usb_phy_roothub_power_off(phy_roothub); /* keep the PHYs initialized so the device can wake up the system */ if (device_may_wakeup(controller_dev)) return 0; return usb_phy_roothub_exit(phy_roothub); } EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend); int usb_phy_roothub_resume(struct device *controller_dev, struct usb_phy_roothub *phy_roothub) { int err; /* if the device can't wake up the system _exit was called */ if (!device_may_wakeup(controller_dev)) { err = usb_phy_roothub_init(phy_roothub); if (err) return err; } err = usb_phy_roothub_power_on(phy_roothub); /* undo _init if _power_on failed */ if (err && !device_may_wakeup(controller_dev)) usb_phy_roothub_exit(phy_roothub); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_resume); |
6 1 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | // SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * rtl8712_io.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com>. * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL8712_IO_C_ #include "osdep_service.h" #include "drv_types.h" #include "rtl871x_io.h" #include "osdep_intf.h" #include "usb_ops.h" u8 r8712_read8(struct _adapter *adapter, u32 addr) { struct intf_hdl *hdl = &adapter->pio_queue->intf; return hdl->io_ops._read8(hdl, addr); } u16 r8712_read16(struct _adapter *adapter, u32 addr) { struct intf_hdl *hdl = &adapter->pio_queue->intf; return hdl->io_ops._read16(hdl, addr); } u32 r8712_read32(struct _adapter *adapter, u32 addr) { struct intf_hdl *hdl = &adapter->pio_queue->intf; return hdl->io_ops._read32(hdl, addr); } void r8712_write8(struct _adapter *adapter, u32 addr, u8 val) { struct intf_hdl *hdl = &adapter->pio_queue->intf; hdl->io_ops._write8(hdl, addr, val); } void r8712_write16(struct _adapter *adapter, u32 addr, u16 val) { struct intf_hdl *hdl = &adapter->pio_queue->intf; hdl->io_ops._write16(hdl, addr, val); } void r8712_write32(struct _adapter *adapter, u32 addr, u32 val) { struct intf_hdl *hdl = &adapter->pio_queue->intf; hdl->io_ops._write32(hdl, addr, val); } void r8712_read_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct intf_hdl *hdl = &adapter->pio_queue->intf; if (adapter->driver_stopped || adapter->surprise_removed) return; hdl->io_ops._read_mem(hdl, addr, cnt, pmem); } void r8712_write_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct intf_hdl *hdl = &adapter->pio_queue->intf; hdl->io_ops._write_mem(hdl, addr, cnt, pmem); } void r8712_read_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct intf_hdl *hdl = &adapter->pio_queue->intf; if (adapter->driver_stopped || adapter->surprise_removed) return; hdl->io_ops._read_port(hdl, addr, cnt, pmem); } void r8712_write_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct intf_hdl *hdl = &adapter->pio_queue->intf; hdl->io_ops._write_port(hdl, addr, cnt, pmem); } |
2 2 17 3 3 3 17 2 2 17 2 1 17 4 3 3 3 3 17 1 1 1 1 1 1 1 1 1 19 2 2 1 1 17 18 18 18 18 11 18 18 17 17 17 17 17 17 17 11 23 22 22 22 18 1 1 1 1 1 1 1 1 17 2 2 1 1 1 1 1 1 1 20 20 20 20 20 20 17 17 17 6 17 17 17 17 17 11 17 17 19 14 17 17 17 4 17 17 17 19 20 20 16 16 16 20 20 20 20 20 1 1 19 19 19 14 13 17 17 1 17 17 17 17 17 17 1 1 17 17 17 1 1 16 2 16 16 1 20 23 23 23 4 22 22 22 1 21 1 20 16 16 23 18 3 18 18 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB Wacom tablet support - system specific code */ #include "wacom_wac.h" #include "wacom.h" #include <linux/input/mt.h> #define WAC_MSG_RETRIES 5 #define WAC_CMD_RETRIES 10 #define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP) #define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP) #define DEV_ATTR_RO_PERM (S_IRUSR | S_IRGRP) static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf, size_t size, unsigned int retries) { int retval; do { retval = hid_hw_raw_request(hdev, buf[0], buf, size, type, HID_REQ_GET_REPORT); } while ((retval == -ETIMEDOUT || retval == -EAGAIN) && --retries); if (retval < 0) hid_err(hdev, "wacom_get_report: ran out of retries " "(last error = %d)\n", retval); return retval; } static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf, size_t size, unsigned int retries) { int retval; do { retval = hid_hw_raw_request(hdev, buf[0], buf, size, type, HID_REQ_SET_REPORT); } while ((retval == -ETIMEDOUT || retval == -EAGAIN) && --retries); if (retval < 0) hid_err(hdev, "wacom_set_report: ran out of retries " "(last error = %d)\n", retval); return retval; } static void wacom_wac_queue_insert(struct hid_device *hdev, struct kfifo_rec_ptr_2 *fifo, u8 *raw_data, int size) { bool warned = false; while (kfifo_avail(fifo) < size) { if (!warned) hid_warn(hdev, "%s: kfifo has filled, starting to drop events\n", __func__); warned = true; kfifo_skip(fifo); } kfifo_in(fifo, raw_data, size); } static void wacom_wac_queue_flush(struct hid_device *hdev, struct kfifo_rec_ptr_2 *fifo) { while (!kfifo_is_empty(fifo)) { u8 buf[WACOM_PKGLEN_MAX]; int size; int err; size = kfifo_out(fifo, buf, sizeof(buf)); err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false); if (err) { hid_warn(hdev, "%s: unable to flush event due to error %d\n", __func__, err); } } } static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int report_size) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; bool flush = false; bool insert = false; int i, j; if (wacom_wac->serial[0] || !(features->quirks & WACOM_QUIRK_TOOLSERIAL)) return 0; /* Queue events which have invalid tool type or serial number */ for (i = 0; i < report->maxfield; i++) { for (j = 0; j < report->field[i]->maxusage; j++) { struct hid_field *field = report->field[i]; struct hid_usage *usage = &field->usage[j]; unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); unsigned int offset; unsigned int size; unsigned int value; if (equivalent_usage != HID_DG_INRANGE && equivalent_usage != HID_DG_TOOLSERIALNUMBER && equivalent_usage != WACOM_HID_WD_SERIALHI && equivalent_usage != WACOM_HID_WD_TOOLTYPE) continue; offset = field->report_offset; size = field->report_size; value = hid_field_extract(hdev, raw_data+1, offset + j * size, size); /* If we go out of range, we need to flush the queue ASAP */ if (equivalent_usage == HID_DG_INRANGE) value = !value; if (value) { flush = true; switch (equivalent_usage) { case HID_DG_TOOLSERIALNUMBER: wacom_wac->serial[0] = value; break; case WACOM_HID_WD_SERIALHI: wacom_wac->serial[0] |= ((__u64)value) << 32; break; case WACOM_HID_WD_TOOLTYPE: wacom_wac->id[0] = value; break; } } else { insert = true; } } } if (flush) wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo); else if (insert) wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo, raw_data, report_size); return insert && !flush; } static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int size) { struct wacom *wacom = hid_get_drvdata(hdev); if (wacom->wacom_wac.features.type == BOOTLOADER) return 0; if (size > WACOM_PKGLEN_MAX) return 1; if (wacom_wac_pen_serial_enforce(hdev, report, raw_data, size)) return -1; memcpy(wacom->wacom_wac.data, raw_data, size); wacom_wac_irq(&wacom->wacom_wac, size); return 0; } static int wacom_open(struct input_dev *dev) { struct wacom *wacom = input_get_drvdata(dev); return hid_hw_open(wacom->hdev); } static void wacom_close(struct input_dev *dev) { struct wacom *wacom = input_get_drvdata(dev); /* * wacom->hdev should never be null, but surprisingly, I had the case * once while unplugging the Wacom Wireless Receiver. */ if (wacom->hdev) hid_hw_close(wacom->hdev); } /* * Calculate the resolution of the X or Y axis using hidinput_calc_abs_res. */ static int wacom_calc_hid_res(int logical_extents, int physical_extents, unsigned unit, int exponent) { struct hid_field field = { .logical_maximum = logical_extents, .physical_maximum = physical_extents, .unit = unit, .unit_exponent = exponent, }; return hidinput_calc_abs_res(&field, ABS_X); } static void wacom_hid_usage_quirk(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_features *features = &wacom->wacom_wac.features; unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); /* * The Dell Canvas 27 needs to be switched to its vendor-defined * report to provide the best resolution. */ if (hdev->vendor == USB_VENDOR_ID_WACOM && hdev->product == 0x4200 && field->application == HID_UP_MSVENDOR) { wacom->wacom_wac.mode_report = field->report->id; wacom->wacom_wac.mode_value = 2; } /* * ISDv4 devices which predate HID's adoption of the * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its * position instead. We can accurately detect if a * usage with that value should be HID_DG_BARRELSWITCH2 * based on the surrounding usages, which have remained * constant across generations. */ if (features->type == HID_GENERIC && usage->hid == 0x000D0000 && field->application == HID_DG_PEN && field->physical == HID_DG_STYLUS) { int i = usage->usage_index; if (i-4 >= 0 && i+1 < field->maxusage && field->usage[i-4].hid == HID_DG_TIPSWITCH && field->usage[i-3].hid == HID_DG_BARRELSWITCH && field->usage[i-2].hid == HID_DG_ERASER && field->usage[i-1].hid == HID_DG_INVERT && field->usage[i+1].hid == HID_DG_INRANGE) { usage->hid = HID_DG_BARRELSWITCH2; } } /* * Wacom's AES devices use different vendor-defined usages to * report serial number information compared to their branded * hardware. The usages are also sometimes ill-defined and do * not have the correct logical min/max values set. Lets patch * the descriptor to use the branded usage convention and fix * the errors. */ if (usage->hid == WACOM_HID_WT_SERIALNUMBER && field->report_size == 16 && field->index + 2 < field->report->maxfield) { struct hid_field *a = field->report->field[field->index + 1]; struct hid_field *b = field->report->field[field->index + 2]; if (a->maxusage > 0 && a->usage[0].hid == HID_DG_TOOLSERIALNUMBER && a->report_size == 32 && b->maxusage > 0 && b->usage[0].hid == 0xFF000000 && b->report_size == 8) { features->quirks |= WACOM_QUIRK_AESPEN; usage->hid = WACOM_HID_WD_TOOLTYPE; field->logical_minimum = S16_MIN; field->logical_maximum = S16_MAX; a->logical_minimum = S32_MIN; a->logical_maximum = S32_MAX; b->usage[0].hid = WACOM_HID_WD_SERIALHI; b->logical_minimum = 0; b->logical_maximum = U8_MAX; } } /* 2nd-generation Intuos Pro Large has incorrect Y maximum */ if (hdev->vendor == USB_VENDOR_ID_WACOM && hdev->product == 0x0358 && WACOM_PEN_FIELD(field) && equivalent_usage == HID_GD_Y) { field->logical_maximum = 43200; } } static void wacom_feature_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_features *features = &wacom->wacom_wac.features; struct hid_data *hid_data = &wacom->wacom_wac.hid_data; unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); u8 *data; int ret; u32 n; wacom_hid_usage_quirk(hdev, field, usage); switch (equivalent_usage) { case WACOM_HID_WD_TOUCH_RING_SETTING: wacom->generic_has_leds = true; break; case HID_DG_CONTACTMAX: /* leave touch_max as is if predefined */ if (!features->touch_max) { /* read manually */ n = hid_report_len(field->report); data = hid_alloc_report_buf(field->report, GFP_KERNEL); if (!data) break; data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, data, n, WAC_CMD_RETRIES); if (ret == n && features->type == HID_GENERIC) { ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, data, n, 0); } else if (ret == 2 && features->type != HID_GENERIC) { features->touch_max = data[1]; } else { features->touch_max = 16; hid_warn(hdev, "wacom_feature_mapping: " "could not get HID_DG_CONTACTMAX, " "defaulting to %d\n", features->touch_max); } kfree(data); } break; case HID_DG_INPUTMODE: /* Ignore if value index is out of bounds. */ if (usage->usage_index >= field->report_count) { dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n"); break; } hid_data->inputmode = field->report->id; hid_data->inputmode_index = usage->usage_index; break; case HID_UP_DIGITIZER: if (field->report->id == 0x0B && (field->application == WACOM_HID_G9_PEN || field->application == WACOM_HID_G11_PEN)) { wacom->wacom_wac.mode_report = field->report->id; wacom->wacom_wac.mode_value = 0; } break; case WACOM_HID_WD_DATAMODE: wacom->wacom_wac.mode_report = field->report->id; wacom->wacom_wac.mode_value = 2; break; case WACOM_HID_UP_G9: case WACOM_HID_UP_G11: if (field->report->id == 0x03 && (field->application == WACOM_HID_G9_TOUCHSCREEN || field->application == WACOM_HID_G11_TOUCHSCREEN)) { wacom->wacom_wac.mode_report = field->report->id; wacom->wacom_wac.mode_value = 0; } break; case WACOM_HID_WD_OFFSETLEFT: case WACOM_HID_WD_OFFSETTOP: case WACOM_HID_WD_OFFSETRIGHT: case WACOM_HID_WD_OFFSETBOTTOM: /* read manually */ n = hid_report_len(field->report); data = hid_alloc_report_buf(field->report, GFP_KERNEL); if (!data) break; data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, data, n, WAC_CMD_RETRIES); if (ret == n) { ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, data, n, 0); } else { hid_warn(hdev, "%s: could not retrieve sensor offsets\n", __func__); } kfree(data); break; } } /* * Interface Descriptor of wacom devices can be incomplete and * inconsistent so wacom_features table is used to store stylus * device's packet lengths, various maximum values, and tablet * resolution based on product ID's. * * For devices that contain 2 interfaces, wacom_features table is * inaccurate for the touch interface. Since the Interface Descriptor * for touch interfaces has pretty complete data, this function exists * to query tablet for this missing information instead of hard coding in * an additional table. * * A typical Interface Descriptor for a stylus will contain a * boot mouse application collection that is not of interest and this * function will ignore it. * * It also contains a digitizer application collection that also is not * of interest since any information it contains would be duplicate * of what is in wacom_features. Usually it defines a report of an array * of bytes that could be used as max length of the stylus packet returned. * If it happens to define a Digitizer-Stylus Physical Collection then * the X and Y logical values contain valid data but it is ignored. * * A typical Interface Descriptor for a touch interface will contain a * Digitizer-Finger Physical Collection which will define both logical * X/Y maximum as well as the physical size of tablet. Since touch * interfaces haven't supported pressure or distance, this is enough * information to override invalid values in the wacom_features table. * * Intuos5 touch interface and 3rd gen Bamboo Touch do not contain useful * data. We deal with them after returning from this function. */ static void wacom_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_features *features = &wacom->wacom_wac.features; bool finger = WACOM_FINGER_FIELD(field); bool pen = WACOM_PEN_FIELD(field); unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); /* * Requiring Stylus Usage will ignore boot mouse * X/Y values and some cases of invalid Digitizer X/Y * values commonly reported. */ if (pen) features->device_type |= WACOM_DEVICETYPE_PEN; else if (finger) features->device_type |= WACOM_DEVICETYPE_TOUCH; else return; wacom_hid_usage_quirk(hdev, field, usage); switch (equivalent_usage) { case HID_GD_X: features->x_max = field->logical_maximum; if (finger) { features->x_phy = field->physical_maximum; if ((features->type != BAMBOO_PT) && (features->type != BAMBOO_TOUCH)) { features->unit = field->unit; features->unitExpo = field->unit_exponent; } } break; case HID_GD_Y: features->y_max = field->logical_maximum; if (finger) { features->y_phy = field->physical_maximum; if ((features->type != BAMBOO_PT) && (features->type != BAMBOO_TOUCH)) { features->unit = field->unit; features->unitExpo = field->unit_exponent; } } break; case HID_DG_TIPPRESSURE: if (pen) features->pressure_max = field->logical_maximum; break; } if (features->type == HID_GENERIC) wacom_wac_usage_mapping(hdev, field, usage); } static void wacom_post_parse_hid(struct hid_device *hdev, struct wacom_features *features) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; if (features->type == HID_GENERIC) { /* Any last-minute generic device setup */ if (wacom_wac->has_mode_change) { if (wacom_wac->is_direct_mode) features->device_type |= WACOM_DEVICETYPE_DIRECT; else features->device_type &= ~WACOM_DEVICETYPE_DIRECT; } if (features->touch_max > 1) { if (features->device_type & WACOM_DEVICETYPE_DIRECT) input_mt_init_slots(wacom_wac->touch_input, wacom_wac->features.touch_max, INPUT_MT_DIRECT); else input_mt_init_slots(wacom_wac->touch_input, wacom_wac->features.touch_max, INPUT_MT_POINTER); } } } static void wacom_parse_hid(struct hid_device *hdev, struct wacom_features *features) { struct hid_report_enum *rep_enum; struct hid_report *hreport; int i, j; /* check features first */ rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; list_for_each_entry(hreport, &rep_enum->report_list, list) { for (i = 0; i < hreport->maxfield; i++) { /* Ignore if report count is out of bounds. */ if (hreport->field[i]->report_count < 1) continue; for (j = 0; j < hreport->field[i]->maxusage; j++) { wacom_feature_mapping(hdev, hreport->field[i], hreport->field[i]->usage + j); } } } /* now check the input usages */ rep_enum = &hdev->report_enum[HID_INPUT_REPORT]; list_for_each_entry(hreport, &rep_enum->report_list, list) { if (!hreport->maxfield) continue; for (i = 0; i < hreport->maxfield; i++) for (j = 0; j < hreport->field[i]->maxusage; j++) wacom_usage_mapping(hdev, hreport->field[i], hreport->field[i]->usage + j); } wacom_post_parse_hid(hdev, features); } static int wacom_hid_set_device_mode(struct hid_device *hdev) { struct wacom *wacom = hid_get_drvdata(hdev); struct hid_data *hid_data = &wacom->wacom_wac.hid_data; struct hid_report *r; struct hid_report_enum *re; if (hid_data->inputmode < 0) return 0; re = &(hdev->report_enum[HID_FEATURE_REPORT]); r = re->report_id_hash[hid_data->inputmode]; if (r) { r->field[0]->value[hid_data->inputmode_index] = 2; hid_hw_request(hdev, r, HID_REQ_SET_REPORT); } return 0; } static int wacom_set_device_mode(struct hid_device *hdev, struct wacom_wac *wacom_wac) { u8 *rep_data; struct hid_report *r; struct hid_report_enum *re; u32 length; int error = -ENOMEM, limit = 0; if (wacom_wac->mode_report < 0) return 0; re = &(hdev->report_enum[HID_FEATURE_REPORT]); r = re->report_id_hash[wacom_wac->mode_report]; if (!r) return -EINVAL; rep_data = hid_alloc_report_buf(r, GFP_KERNEL); if (!rep_data) return -ENOMEM; length = hid_report_len(r); do { rep_data[0] = wacom_wac->mode_report; rep_data[1] = wacom_wac->mode_value; error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, length, 1); if (error >= 0) error = wacom_get_report(hdev, HID_FEATURE_REPORT, rep_data, length, 1); } while (error >= 0 && rep_data[1] != wacom_wac->mode_report && limit++ < WAC_MSG_RETRIES); kfree(rep_data); return error < 0 ? error : 0; } static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed, struct wacom_features *features) { struct wacom *wacom = hid_get_drvdata(hdev); int ret; u8 rep_data[2]; switch (features->type) { case GRAPHIRE_BT: rep_data[0] = 0x03; rep_data[1] = 0x00; ret = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 2, 3); if (ret >= 0) { rep_data[0] = speed == 0 ? 0x05 : 0x06; rep_data[1] = 0x00; ret = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 2, 3); if (ret >= 0) { wacom->wacom_wac.bt_high_speed = speed; return 0; } } /* * Note that if the raw queries fail, it's not a hard failure * and it is safe to continue */ hid_warn(hdev, "failed to poke device, command %d, err %d\n", rep_data[0], ret); break; case INTUOS4WL: if (speed == 1) wacom->wacom_wac.bt_features &= ~0x20; else wacom->wacom_wac.bt_features |= 0x20; rep_data[0] = 0x03; rep_data[1] = wacom->wacom_wac.bt_features; ret = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 2, 1); if (ret >= 0) wacom->wacom_wac.bt_high_speed = speed; break; } return 0; } /* * Switch the tablet into its most-capable mode. Wacom tablets are * typically configured to power-up in a mode which sends mouse-like * reports to the OS. To get absolute position, pressure data, etc. * from the tablet, it is necessary to switch the tablet out of this * mode and into one which sends the full range of tablet data. */ static int _wacom_query_tablet_data(struct wacom *wacom) { struct hid_device *hdev = wacom->hdev; struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; if (hdev->bus == BUS_BLUETOOTH) return wacom_bt_query_tablet_data(hdev, 1, features); if (features->type != HID_GENERIC) { if (features->device_type & WACOM_DEVICETYPE_TOUCH) { if (features->type > TABLETPC) { /* MT Tablet PC touch */ wacom_wac->mode_report = 3; wacom_wac->mode_value = 4; } else if (features->type == WACOM_24HDT) { wacom_wac->mode_report = 18; wacom_wac->mode_value = 2; } else if (features->type == WACOM_27QHDT) { wacom_wac->mode_report = 131; wacom_wac->mode_value = 2; } else if (features->type == BAMBOO_PAD) { wacom_wac->mode_report = 2; wacom_wac->mode_value = 2; } } else if (features->device_type & WACOM_DEVICETYPE_PEN) { if (features->type <= BAMBOO_PT) { wacom_wac->mode_report = 2; wacom_wac->mode_value = 2; } } } wacom_set_device_mode(hdev, wacom_wac); if (features->type == HID_GENERIC) return wacom_hid_set_device_mode(hdev); return 0; } static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, struct wacom_features *features) { struct wacom *wacom = hid_get_drvdata(hdev); struct usb_interface *intf = wacom->intf; /* default features */ features->x_fuzz = 4; features->y_fuzz = 4; features->pressure_fuzz = 0; features->distance_fuzz = 1; features->tilt_fuzz = 1; /* * The wireless device HID is basic and layout conflicts with * other tablets (monitor and touch interface can look like pen). * Skip the query for this type and modify defaults based on * interface number. */ if (features->type == WIRELESS && intf) { if (intf->cur_altsetting->desc.bInterfaceNumber == 0) features->device_type = WACOM_DEVICETYPE_WL_MONITOR; else features->device_type = WACOM_DEVICETYPE_NONE; return; } wacom_parse_hid(hdev, features); } struct wacom_hdev_data { struct list_head list; struct kref kref; struct hid_device *dev; struct wacom_shared shared; }; static LIST_HEAD(wacom_udev_list); static DEFINE_MUTEX(wacom_udev_list_lock); static bool wacom_are_sibling(struct hid_device *hdev, struct hid_device *sibling) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_features *features = &wacom->wacom_wac.features; struct wacom *sibling_wacom = hid_get_drvdata(sibling); struct wacom_features *sibling_features = &sibling_wacom->wacom_wac.features; __u32 oVid = features->oVid ? features->oVid : hdev->vendor; __u32 oPid = features->oPid ? features->oPid : hdev->product; /* The defined oVid/oPid must match that of the sibling */ if (features->oVid != HID_ANY_ID && sibling->vendor != oVid) return false; if (features->oPid != HID_ANY_ID && sibling->product != oPid) return false; /* * Devices with the same VID/PID must share the same physical * device path, while those with different VID/PID must share * the same physical parent device path. */ if (hdev->vendor == sibling->vendor && hdev->product == sibling->product) { if (!hid_compare_device_paths(hdev, sibling, '/')) return false; } else { if (!hid_compare_device_paths(hdev, sibling, '.')) return false; } /* Skip the remaining heuristics unless you are a HID_GENERIC device */ if (features->type != HID_GENERIC) return true; /* * Direct-input devices may not be siblings of indirect-input * devices. */ if ((features->device_type & WACOM_DEVICETYPE_DIRECT) && !(sibling_features->device_type & WACOM_DEVICETYPE_DIRECT)) return false; /* * Indirect-input devices may not be siblings of direct-input * devices. */ if (!(features->device_type & WACOM_DEVICETYPE_DIRECT) && (sibling_features->device_type & WACOM_DEVICETYPE_DIRECT)) return false; /* Pen devices may only be siblings of touch devices */ if ((features->device_type & WACOM_DEVICETYPE_PEN) && !(sibling_features->device_type & WACOM_DEVICETYPE_TOUCH)) return false; /* Touch devices may only be siblings of pen devices */ if ((features->device_type & WACOM_DEVICETYPE_TOUCH) && !(sibling_features->device_type & WACOM_DEVICETYPE_PEN)) return false; /* * No reason could be found for these two devices to NOT be * siblings, so there's a good chance they ARE siblings */ return true; } static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev) { struct wacom_hdev_data *data; /* Try to find an already-probed interface from the same device */ list_for_each_entry(data, &wacom_udev_list, list) { if (hid_compare_device_paths(hdev, data->dev, '/')) { kref_get(&data->kref); return data; } } /* Fallback to finding devices that appear to be "siblings" */ list_for_each_entry(data, &wacom_udev_list, list) { if (wacom_are_sibling(hdev, data->dev)) { kref_get(&data->kref); return data; } } return NULL; } static void wacom_release_shared_data(struct kref *kref) { struct wacom_hdev_data *data = container_of(kref, struct wacom_hdev_data, kref); mutex_lock(&wacom_udev_list_lock); list_del(&data->list); mutex_unlock(&wacom_udev_list_lock); kfree(data); } static void wacom_remove_shared_data(void *res) { struct wacom *wacom = res; struct wacom_hdev_data *data; struct wacom_wac *wacom_wac = &wacom->wacom_wac; if (wacom_wac->shared) { data = container_of(wacom_wac->shared, struct wacom_hdev_data, shared); if (wacom_wac->shared->touch == wacom->hdev) wacom_wac->shared->touch = NULL; else if (wacom_wac->shared->pen == wacom->hdev) wacom_wac->shared->pen = NULL; kref_put(&data->kref, wacom_release_shared_data); wacom_wac->shared = NULL; } } static int wacom_add_shared_data(struct hid_device *hdev) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_hdev_data *data; int retval = 0; mutex_lock(&wacom_udev_list_lock); data = wacom_get_hdev_data(hdev); if (!data) { data = kzalloc(sizeof(struct wacom_hdev_data), GFP_KERNEL); if (!data) { mutex_unlock(&wacom_udev_list_lock); return -ENOMEM; } kref_init(&data->kref); data->dev = hdev; list_add_tail(&data->list, &wacom_udev_list); } mutex_unlock(&wacom_udev_list_lock); wacom_wac->shared = &data->shared; retval = devm_add_action_or_reset(&hdev->dev, wacom_remove_shared_data, wacom); if (retval) return retval; if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) wacom_wac->shared->touch = hdev; else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) wacom_wac->shared->pen = hdev; return retval; } static int wacom_led_control(struct wacom *wacom) { unsigned char *buf; int retval; unsigned char report_id = WAC_CMD_LED_CONTROL; int buf_size = 9; if (!wacom->led.groups) return -ENOTSUPP; if (wacom->wacom_wac.features.type == REMOTE) return -ENOTSUPP; if (wacom->wacom_wac.pid) { /* wireless connected */ report_id = WAC_CMD_WL_LED_CONTROL; buf_size = 13; } else if (wacom->wacom_wac.features.type == INTUOSP2_BT) { report_id = WAC_CMD_WL_INTUOSP2; buf_size = 51; } buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; if (wacom->wacom_wac.features.type == HID_GENERIC) { buf[0] = WAC_CMD_LED_CONTROL_GENERIC; buf[1] = wacom->led.llv; buf[2] = wacom->led.groups[0].select & 0x03; } else if ((wacom->wacom_wac.features.type >= INTUOS5S && wacom->wacom_wac.features.type <= INTUOSPL)) { /* * Touch Ring and crop mark LED luminance may take on * one of four values: * 0 = Low; 1 = Medium; 2 = High; 3 = Off */ int ring_led = wacom->led.groups[0].select & 0x03; int ring_lum = (((wacom->led.llv & 0x60) >> 5) - 1) & 0x03; int crop_lum = 0; unsigned char led_bits = (crop_lum << 4) | (ring_lum << 2) | (ring_led); buf[0] = report_id; if (wacom->wacom_wac.pid) { wacom_get_report(wacom->hdev, HID_FEATURE_REPORT, buf, buf_size, WAC_CMD_RETRIES); buf[0] = report_id; buf[4] = led_bits; } else buf[1] = led_bits; } else if (wacom->wacom_wac.features.type == INTUOSP2_BT) { buf[0] = report_id; buf[4] = 100; // Power Connection LED (ORANGE) buf[5] = 100; // BT Connection LED (BLUE) buf[6] = 100; // Paper Mode (RED?) buf[7] = 100; // Paper Mode (GREEN?) buf[8] = 100; // Paper Mode (BLUE?) buf[9] = wacom->led.llv; buf[10] = wacom->led.groups[0].select & 0x03; } else { int led = wacom->led.groups[0].select | 0x4; if (wacom->wacom_wac.features.type == WACOM_21UX2 || wacom->wacom_wac.features.type == WACOM_24HD) led |= (wacom->led.groups[1].select << 4) | 0x40; buf[0] = report_id; buf[1] = led; buf[2] = wacom->led.llv; buf[3] = wacom->led.hlv; buf[4] = wacom->led.img_lum; } retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, buf_size, WAC_CMD_RETRIES); kfree(buf); return retval; } static int wacom_led_putimage(struct wacom *wacom, int button_id, u8 xfer_id, const unsigned len, const void *img) { unsigned char *buf; int i, retval; const unsigned chunk_len = len / 4; /* 4 chunks are needed to be sent */ buf = kzalloc(chunk_len + 3 , GFP_KERNEL); if (!buf) return -ENOMEM; /* Send 'start' command */ buf[0] = WAC_CMD_ICON_START; buf[1] = 1; retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 2, WAC_CMD_RETRIES); if (retval < 0) goto out; buf[0] = xfer_id; buf[1] = button_id & 0x07; for (i = 0; i < 4; i++) { buf[2] = i; memcpy(buf + 3, img + i * chunk_len, chunk_len); retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, chunk_len + 3, WAC_CMD_RETRIES); if (retval < 0) break; } /* Send 'stop' */ buf[0] = WAC_CMD_ICON_START; buf[1] = 0; wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 2, WAC_CMD_RETRIES); out: kfree(buf); return retval; } static ssize_t wacom_led_select_store(struct device *dev, int set_id, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct wacom *wacom = hid_get_drvdata(hdev); unsigned int id; int err; err = kstrtouint(buf, 10, &id); if (err) return err; mutex_lock(&wacom->lock); wacom->led.groups[set_id].select = id & 0x3; err = wacom_led_control(wacom); mutex_unlock(&wacom->lock); return err < 0 ? err : count; } #define DEVICE_LED_SELECT_ATTR(SET_ID) \ static ssize_t wacom_led##SET_ID##_select_store(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ return wacom_led_select_store(dev, SET_ID, buf, count); \ } \ static ssize_t wacom_led##SET_ID##_select_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct hid_device *hdev = to_hid_device(dev);\ struct wacom *wacom = hid_get_drvdata(hdev); \ return scnprintf(buf, PAGE_SIZE, "%d\n", \ wacom->led.groups[SET_ID].select); \ } \ static DEVICE_ATTR(status_led##SET_ID##_select, DEV_ATTR_RW_PERM, \ wacom_led##SET_ID##_select_show, \ wacom_led##SET_ID##_select_store) DEVICE_LED_SELECT_ATTR(0); DEVICE_LED_SELECT_ATTR(1); static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest, const char *buf, size_t count) { unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err) return err; mutex_lock(&wacom->lock); *dest = value & 0x7f; err = wacom_led_control(wacom); mutex_unlock(&wacom->lock); return err < 0 ? err : count; } #define DEVICE_LUMINANCE_ATTR(name, field) \ static ssize_t wacom_##name##_luminance_store(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ struct hid_device *hdev = to_hid_device(dev);\ struct wacom *wacom = hid_get_drvdata(hdev); \ \ return wacom_luminance_store(wacom, &wacom->led.field, \ buf, count); \ } \ static ssize_t wacom_##name##_luminance_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct wacom *wacom = dev_get_drvdata(dev); \ return scnprintf(buf, PAGE_SIZE, "%d\n", wacom->led.field); \ } \ static DEVICE_ATTR(name##_luminance, DEV_ATTR_RW_PERM, \ wacom_##name##_luminance_show, \ wacom_##name##_luminance_store) DEVICE_LUMINANCE_ATTR(status0, llv); DEVICE_LUMINANCE_ATTR(status1, hlv); DEVICE_LUMINANCE_ATTR(buttons, img_lum); static ssize_t wacom_button_image_store(struct device *dev, int button_id, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct wacom *wacom = hid_get_drvdata(hdev); int err; unsigned len; u8 xfer_id; if (hdev->bus == BUS_BLUETOOTH) { len = 256; xfer_id = WAC_CMD_ICON_BT_XFER; } else { len = 1024; xfer_id = WAC_CMD_ICON_XFER; } if (count != len) return -EINVAL; mutex_lock(&wacom->lock); err = wacom_led_putimage(wacom, button_id, xfer_id, len, buf); mutex_unlock(&wacom->lock); return err < 0 ? err : count; } #define DEVICE_BTNIMG_ATTR(BUTTON_ID) \ static ssize_t wacom_btnimg##BUTTON_ID##_store(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ return wacom_button_image_store(dev, BUTTON_ID, buf, count); \ } \ static DEVICE_ATTR(button##BUTTON_ID##_rawimg, DEV_ATTR_WO_PERM, \ NULL, wacom_btnimg##BUTTON_ID##_store) DEVICE_BTNIMG_ATTR(0); DEVICE_BTNIMG_ATTR(1); DEVICE_BTNIMG_ATTR(2); DEVICE_BTNIMG_ATTR(3); DEVICE_BTNIMG_ATTR(4); DEVICE_BTNIMG_ATTR(5); DEVICE_BTNIMG_ATTR(6); DEVICE_BTNIMG_ATTR(7); static struct attribute *cintiq_led_attrs[] = { &dev_attr_status_led0_select.attr, &dev_attr_status_led1_select.attr, NULL }; static const struct attribute_group cintiq_led_attr_group = { .name = "wacom_led", .attrs = cintiq_led_attrs, }; static struct attribute *intuos4_led_attrs[] = { &dev_attr_status0_luminance.attr, &dev_attr_status1_luminance.attr, &dev_attr_status_led0_select.attr, &dev_attr_buttons_luminance.attr, &dev_attr_button0_rawimg.attr, &dev_attr_button1_rawimg.attr, &dev_attr_button2_rawimg.attr, &dev_attr_button3_rawimg.attr, &dev_attr_button4_rawimg.attr, &dev_attr_button5_rawimg.attr, &dev_attr_button6_rawimg.attr, &dev_attr_button7_rawimg.attr, NULL }; static const struct attribute_group intuos4_led_attr_group = { .name = "wacom_led", .attrs = intuos4_led_attrs, }; static struct attribute *intuos5_led_attrs[] = { &dev_attr_status0_luminance.attr, &dev_attr_status_led0_select.attr, NULL }; static const struct attribute_group intuos5_led_attr_group = { .name = "wacom_led", .attrs = intuos5_led_attrs, }; static struct attribute *generic_led_attrs[] = { &dev_attr_status0_luminance.attr, &dev_attr_status_led0_select.attr, NULL }; static const struct attribute_group generic_led_attr_group = { .name = "wacom_led", .attrs = generic_led_attrs, }; struct wacom_sysfs_group_devres { const struct attribute_group *group; struct kobject *root; }; static void wacom_devm_sysfs_group_release(struct device *dev, void *res) { struct wacom_sysfs_group_devres *devres = res; struct kobject *kobj = devres->root; dev_dbg(dev, "%s: dropping reference to %s\n", __func__, devres->group->name); sysfs_remove_group(kobj, devres->group); } static int __wacom_devm_sysfs_create_group(struct wacom *wacom, struct kobject *root, const struct attribute_group *group) { struct wacom_sysfs_group_devres *devres; int error; devres = devres_alloc(wacom_devm_sysfs_group_release, sizeof(struct wacom_sysfs_group_devres), GFP_KERNEL); if (!devres) return -ENOMEM; devres->group = group; devres->root = root; error = sysfs_create_group(devres->root, group); if (error) { devres_free(devres); return error; } devres_add(&wacom->hdev->dev, devres); return 0; } static int wacom_devm_sysfs_create_group(struct wacom *wacom, const struct attribute_group *group) { return __wacom_devm_sysfs_create_group(wacom, &wacom->hdev->dev.kobj, group); } static void wacom_devm_kfifo_release(struct device *dev, void *res) { struct kfifo_rec_ptr_2 *devres = res; kfifo_free(devres); } static int wacom_devm_kfifo_alloc(struct wacom *wacom) { struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct kfifo_rec_ptr_2 *pen_fifo; int error; pen_fifo = devres_alloc(wacom_devm_kfifo_release, sizeof(struct kfifo_rec_ptr_2), GFP_KERNEL); if (!pen_fifo) return -ENOMEM; error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); if (error) { devres_free(pen_fifo); return error; } devres_add(&wacom->hdev->dev, pen_fifo); wacom_wac->pen_fifo = pen_fifo; return 0; } enum led_brightness wacom_leds_brightness_get(struct wacom_led *led) { struct wacom *wacom = led->wacom; if (wacom->led.max_hlv) return led->hlv * LED_FULL / wacom->led.max_hlv; if (wacom->led.max_llv) return led->llv * LED_FULL / wacom->led.max_llv; /* device doesn't support brightness tuning */ return LED_FULL; } static enum led_brightness __wacom_led_brightness_get(struct led_classdev *cdev) { struct wacom_led *led = container_of(cdev, struct wacom_led, cdev); struct wacom *wacom = led->wacom; if (wacom->led.groups[led->group].select != led->id) return LED_OFF; return wacom_leds_brightness_get(led); } static int wacom_led_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { struct wacom_led *led = container_of(cdev, struct wacom_led, cdev); struct wacom *wacom = led->wacom; int error; mutex_lock(&wacom->lock); if (!wacom->led.groups || (brightness == LED_OFF && wacom->led.groups[led->group].select != led->id)) { error = 0; goto out; } led->llv = wacom->led.llv = wacom->led.max_llv * brightness / LED_FULL; led->hlv = wacom->led.hlv = wacom->led.max_hlv * brightness / LED_FULL; wacom->led.groups[led->group].select = led->id; error = wacom_led_control(wacom); out: mutex_unlock(&wacom->lock); return error; } static void wacom_led_readonly_brightness_set(struct led_classdev *cdev, enum led_brightness brightness) { } static int wacom_led_register_one(struct device *dev, struct wacom *wacom, struct wacom_led *led, unsigned int group, unsigned int id, bool read_only) { int error; char *name; name = devm_kasprintf(dev, GFP_KERNEL, "%s::wacom-%d.%d", dev_name(dev), group, id); if (!name) return -ENOMEM; if (!read_only) { led->trigger.name = name; error = devm_led_trigger_register(dev, &led->trigger); if (error) { hid_err(wacom->hdev, "failed to register LED trigger %s: %d\n", led->cdev.name, error); return error; } } led->group = group; led->id = id; led->wacom = wacom; led->llv = wacom->led.llv; led->hlv = wacom->led.hlv; led->cdev.name = name; led->cdev.max_brightness = LED_FULL; led->cdev.flags = LED_HW_PLUGGABLE; led->cdev.brightness_get = __wacom_led_brightness_get; if (!read_only) { led->cdev.brightness_set_blocking = wacom_led_brightness_set; led->cdev.default_trigger = led->cdev.name; } else { led->cdev.brightness_set = wacom_led_readonly_brightness_set; } error = devm_led_classdev_register(dev, &led->cdev); if (error) { hid_err(wacom->hdev, "failed to register LED %s: %d\n", led->cdev.name, error); led->cdev.name = NULL; return error; } return 0; } static void wacom_led_groups_release_one(void *data) { struct wacom_group_leds *group = data; devres_release_group(group->dev, group); } static int wacom_led_groups_alloc_and_register_one(struct device *dev, struct wacom *wacom, int group_id, int count, bool read_only) { struct wacom_led *leds; int i, error; if (group_id >= wacom->led.count || count <= 0) return -EINVAL; if (!devres_open_group(dev, &wacom->led.groups[group_id], GFP_KERNEL)) return -ENOMEM; leds = devm_kcalloc(dev, count, sizeof(struct wacom_led), GFP_KERNEL); if (!leds) { error = -ENOMEM; goto err; } wacom->led.groups[group_id].leds = leds; wacom->led.groups[group_id].count = count; for (i = 0; i < count; i++) { error = wacom_led_register_one(dev, wacom, &leds[i], group_id, i, read_only); if (error) goto err; } wacom->led.groups[group_id].dev = dev; devres_close_group(dev, &wacom->led.groups[group_id]); /* * There is a bug (?) in devm_led_classdev_register() in which its * increments the refcount of the parent. If the parent is an input * device, that means the ref count never reaches 0 when * devm_input_device_release() gets called. * This means that the LEDs are still there after disconnect. * Manually force the release of the group so that the leds are released * once we are done using them. */ error = devm_add_action_or_reset(&wacom->hdev->dev, wacom_led_groups_release_one, &wacom->led.groups[group_id]); if (error) return error; return 0; err: devres_release_group(dev, &wacom->led.groups[group_id]); return error; } struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group_id, unsigned int id) { struct wacom_group_leds *group; if (group_id >= wacom->led.count) return NULL; group = &wacom->led.groups[group_id]; if (!group->leds) return NULL; id %= group->count; return &group->leds[id]; } /* * wacom_led_next: gives the next available led with a wacom trigger. * * returns the next available struct wacom_led which has its default trigger * or the current one if none is available. */ struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur) { struct wacom_led *next_led; int group, next; if (!wacom || !cur) return NULL; group = cur->group; next = cur->id; do { next_led = wacom_led_find(wacom, group, ++next); if (!next_led || next_led == cur) return next_led; } while (next_led->cdev.trigger != &next_led->trigger); return next_led; } static void wacom_led_groups_release(void *data) { struct wacom *wacom = data; wacom->led.groups = NULL; wacom->led.count = 0; } static int wacom_led_groups_allocate(struct wacom *wacom, int count) { struct device *dev = &wacom->hdev->dev; struct wacom_group_leds *groups; int error; groups = devm_kcalloc(dev, count, sizeof(struct wacom_group_leds), GFP_KERNEL); if (!groups) return -ENOMEM; error = devm_add_action_or_reset(dev, wacom_led_groups_release, wacom); if (error) return error; wacom->led.groups = groups; wacom->led.count = count; return 0; } static int wacom_leds_alloc_and_register(struct wacom *wacom, int group_count, int led_per_group, bool read_only) { struct device *dev; int i, error; if (!wacom->wacom_wac.pad_input) return -EINVAL; dev = &wacom->wacom_wac.pad_input->dev; error = wacom_led_groups_allocate(wacom, group_count); if (error) return error; for (i = 0; i < group_count; i++) { error = wacom_led_groups_alloc_and_register_one(dev, wacom, i, led_per_group, read_only); if (error) return error; } return 0; } int wacom_initialize_leds(struct wacom *wacom) { int error; if (!(wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD)) return 0; /* Initialize default values */ switch (wacom->wacom_wac.features.type) { case HID_GENERIC: if (!wacom->generic_has_leds) return 0; wacom->led.llv = 100; wacom->led.max_llv = 100; error = wacom_leds_alloc_and_register(wacom, 1, 4, false); if (error) { hid_err(wacom->hdev, "cannot create leds err: %d\n", error); return error; } error = wacom_devm_sysfs_create_group(wacom, &generic_led_attr_group); break; case INTUOS4S: case INTUOS4: case INTUOS4WL: case INTUOS4L: wacom->led.llv = 10; wacom->led.hlv = 20; wacom->led.max_llv = 127; wacom->led.max_hlv = 127; wacom->led.img_lum = 10; error = wacom_leds_alloc_and_register(wacom, 1, 4, false); if (error) { hid_err(wacom->hdev, "cannot create leds err: %d\n", error); return error; } error = wacom_devm_sysfs_create_group(wacom, &intuos4_led_attr_group); break; case WACOM_24HD: case WACOM_21UX2: wacom->led.llv = 0; wacom->led.hlv = 0; wacom->led.img_lum = 0; error = wacom_leds_alloc_and_register(wacom, 2, 4, false); if (error) { hid_err(wacom->hdev, "cannot create leds err: %d\n", error); return error; } error = wacom_devm_sysfs_create_group(wacom, &cintiq_led_attr_group); break; case INTUOS5S: case INTUOS5: case INTUOS5L: case INTUOSPS: case INTUOSPM: case INTUOSPL: wacom->led.llv = 32; wacom->led.max_llv = 96; error = wacom_leds_alloc_and_register(wacom, 1, 4, false); if (error) { hid_err(wacom->hdev, "cannot create leds err: %d\n", error); return error; } error = wacom_devm_sysfs_create_group(wacom, &intuos5_led_attr_group); break; case INTUOSP2_BT: wacom->led.llv = 50; wacom->led.max_llv = 100; error = wacom_leds_alloc_and_register(wacom, 1, 4, false); if (error) { hid_err(wacom->hdev, "cannot create leds err: %d\n", error); return error; } return 0; case REMOTE: wacom->led.llv = 255; wacom->led.max_llv = 255; error = wacom_led_groups_allocate(wacom, 5); if (error) { hid_err(wacom->hdev, "cannot create leds err: %d\n", error); return error; } return 0; default: return 0; } if (error) { hid_err(wacom->hdev, "cannot create sysfs group err: %d\n", error); return error; } return 0; } static void wacom_init_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, init_work.work); _wacom_query_tablet_data(wacom); wacom_led_control(wacom); } static void wacom_query_tablet_data(struct wacom *wacom) { schedule_delayed_work(&wacom->init_work, msecs_to_jiffies(1000)); } static enum power_supply_property wacom_battery_props[] = { POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_CAPACITY }; static int wacom_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct wacom_battery *battery = power_supply_get_drvdata(psy); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = battery->wacom->wacom_wac.name; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = battery->bat_connected; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = battery->battery_capacity; break; case POWER_SUPPLY_PROP_STATUS: if (battery->bat_status != WACOM_POWER_SUPPLY_STATUS_AUTO) val->intval = battery->bat_status; else if (battery->bat_charging) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (battery->battery_capacity == 100 && battery->ps_connected) val->intval = POWER_SUPPLY_STATUS_FULL; else if (battery->ps_connected) val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; default: ret = -EINVAL; break; } return ret; } static int __wacom_initialize_battery(struct wacom *wacom, struct wacom_battery *battery) { static atomic_t battery_no = ATOMIC_INIT(0); struct device *dev = &wacom->hdev->dev; struct power_supply_config psy_cfg = { .drv_data = battery, }; struct power_supply *ps_bat; struct power_supply_desc *bat_desc = &battery->bat_desc; unsigned long n; int error; if (!devres_open_group(dev, bat_desc, GFP_KERNEL)) return -ENOMEM; battery->wacom = wacom; n = atomic_inc_return(&battery_no) - 1; bat_desc->properties = wacom_battery_props; bat_desc->num_properties = ARRAY_SIZE(wacom_battery_props); bat_desc->get_property = wacom_battery_get_property; sprintf(battery->bat_name, "wacom_battery_%ld", n); bat_desc->name = battery->bat_name; bat_desc->type = POWER_SUPPLY_TYPE_BATTERY; bat_desc->use_for_apm = 0; ps_bat = devm_power_supply_register(dev, bat_desc, &psy_cfg); if (IS_ERR(ps_bat)) { error = PTR_ERR(ps_bat); goto err; } power_supply_powers(ps_bat, &wacom->hdev->dev); battery->battery = ps_bat; devres_close_group(dev, bat_desc); return 0; err: devres_release_group(dev, bat_desc); return error; } static int wacom_initialize_battery(struct wacom *wacom) { if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) return __wacom_initialize_battery(wacom, &wacom->battery); return 0; } static void wacom_destroy_battery(struct wacom *wacom) { if (wacom->battery.battery) { devres_release_group(&wacom->hdev->dev, &wacom->battery.bat_desc); wacom->battery.battery = NULL; } } static void wacom_aes_battery_handler(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, aes_battery_work.work); wacom_destroy_battery(wacom); } static ssize_t wacom_show_speed(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct wacom *wacom = hid_get_drvdata(hdev); return sysfs_emit(buf, "%i\n", wacom->wacom_wac.bt_high_speed); } static ssize_t wacom_store_speed(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct wacom *wacom = hid_get_drvdata(hdev); u8 new_speed; if (kstrtou8(buf, 0, &new_speed)) return -EINVAL; if (new_speed != 0 && new_speed != 1) return -EINVAL; wacom_bt_query_tablet_data(hdev, new_speed, &wacom->wacom_wac.features); return count; } static DEVICE_ATTR(speed, DEV_ATTR_RW_PERM, wacom_show_speed, wacom_store_speed); static ssize_t wacom_show_remote_mode(struct kobject *kobj, struct kobj_attribute *kattr, char *buf, int index) { struct device *dev = kobj_to_dev(kobj->parent); struct hid_device *hdev = to_hid_device(dev); struct wacom *wacom = hid_get_drvdata(hdev); u8 mode; mode = wacom->led.groups[index].select; return sprintf(buf, "%d\n", mode < 3 ? mode : -1); } #define DEVICE_EKR_ATTR_GROUP(SET_ID) \ static ssize_t wacom_show_remote##SET_ID##_mode(struct kobject *kobj, \ struct kobj_attribute *kattr, char *buf) \ { \ return wacom_show_remote_mode(kobj, kattr, buf, SET_ID); \ } \ static struct kobj_attribute remote##SET_ID##_mode_attr = { \ .attr = {.name = "remote_mode", \ .mode = DEV_ATTR_RO_PERM}, \ .show = wacom_show_remote##SET_ID##_mode, \ }; \ static struct attribute *remote##SET_ID##_serial_attrs[] = { \ &remote##SET_ID##_mode_attr.attr, \ NULL \ }; \ static const struct attribute_group remote##SET_ID##_serial_group = { \ .name = NULL, \ .attrs = remote##SET_ID##_serial_attrs, \ } DEVICE_EKR_ATTR_GROUP(0); DEVICE_EKR_ATTR_GROUP(1); DEVICE_EKR_ATTR_GROUP(2); DEVICE_EKR_ATTR_GROUP(3); DEVICE_EKR_ATTR_GROUP(4); static int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial, int index) { int error = 0; struct wacom_remote *remote = wacom->remote; remote->remotes[index].group.name = devm_kasprintf(&wacom->hdev->dev, GFP_KERNEL, "%d", serial); if (!remote->remotes[index].group.name) return -ENOMEM; error = __wacom_devm_sysfs_create_group(wacom, remote->remote_dir, &remote->remotes[index].group); if (error) { remote->remotes[index].group.name = NULL; hid_err(wacom->hdev, "cannot create sysfs group err: %d\n", error); return error; } return 0; } static int wacom_cmd_unpair_remote(struct wacom *wacom, unsigned char selector) { const size_t buf_size = 2; unsigned char *buf; int retval; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = WAC_CMD_DELETE_PAIRING; buf[1] = selector; retval = wacom_set_report(wacom->hdev, HID_OUTPUT_REPORT, buf, buf_size, WAC_CMD_RETRIES); kfree(buf); return retval; } static ssize_t wacom_store_unpair_remote(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned char selector = 0; struct device *dev = kobj_to_dev(kobj->parent); struct hid_device *hdev = to_hid_device(dev); struct wacom *wacom = hid_get_drvdata(hdev); int err; if (!strncmp(buf, "*\n", 2)) { selector = WAC_CMD_UNPAIR_ALL; } else { hid_info(wacom->hdev, "remote: unrecognized unpair code: %s\n", buf); return -1; } mutex_lock(&wacom->lock); err = wacom_cmd_unpair_remote(wacom, selector); mutex_unlock(&wacom->lock); return err < 0 ? err : count; } static struct kobj_attribute unpair_remote_attr = { .attr = {.name = "unpair_remote", .mode = 0200}, .store = wacom_store_unpair_remote, }; static const struct attribute *remote_unpair_attrs[] = { &unpair_remote_attr.attr, NULL }; static void wacom_remotes_destroy(void *data) { struct wacom *wacom = data; struct wacom_remote *remote = wacom->remote; if (!remote) return; kobject_put(remote->remote_dir); kfifo_free(&remote->remote_fifo); wacom->remote = NULL; } static int wacom_initialize_remotes(struct wacom *wacom) { int error = 0; struct wacom_remote *remote; int i; if (wacom->wacom_wac.features.type != REMOTE) return 0; remote = devm_kzalloc(&wacom->hdev->dev, sizeof(*wacom->remote), GFP_KERNEL); if (!remote) return -ENOMEM; wacom->remote = remote; spin_lock_init(&remote->remote_lock); error = kfifo_alloc(&remote->remote_fifo, 5 * sizeof(struct wacom_remote_work_data), GFP_KERNEL); if (error) { hid_err(wacom->hdev, "failed allocating remote_fifo\n"); return -ENOMEM; } remote->remotes[0].group = remote0_serial_group; remote->remotes[1].group = remote1_serial_group; remote->remotes[2].group = remote2_serial_group; remote->remotes[3].group = remote3_serial_group; remote->remotes[4].group = remote4_serial_group; remote->remote_dir = kobject_create_and_add("wacom_remote", &wacom->hdev->dev.kobj); if (!remote->remote_dir) return -ENOMEM; error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs); if (error) { hid_err(wacom->hdev, "cannot create sysfs group err: %d\n", error); return error; } for (i = 0; i < WACOM_MAX_REMOTES; i++) { wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN; remote->remotes[i].serial = 0; } error = devm_add_action_or_reset(&wacom->hdev->dev, wacom_remotes_destroy, wacom); if (error) return error; return 0; } static struct input_dev *wacom_allocate_input(struct wacom *wacom) { struct input_dev *input_dev; struct hid_device *hdev = wacom->hdev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); input_dev = devm_input_allocate_device(&hdev->dev); if (!input_dev) return NULL; input_dev->name = wacom_wac->features.name; input_dev->phys = hdev->phys; input_dev->dev.parent = &hdev->dev; input_dev->open = wacom_open; input_dev->close = wacom_close; input_dev->uniq = hdev->uniq; input_dev->id.bustype = hdev->bus; input_dev->id.vendor = hdev->vendor; input_dev->id.product = wacom_wac->pid ? wacom_wac->pid : hdev->product; input_dev->id.version = hdev->version; input_set_drvdata(input_dev, wacom); return input_dev; } static int wacom_allocate_inputs(struct wacom *wacom) { struct wacom_wac *wacom_wac = &(wacom->wacom_wac); wacom_wac->pen_input = wacom_allocate_input(wacom); wacom_wac->touch_input = wacom_allocate_input(wacom); wacom_wac->pad_input = wacom_allocate_input(wacom); if (!wacom_wac->pen_input || !wacom_wac->touch_input || !wacom_wac->pad_input) return -ENOMEM; wacom_wac->pen_input->name = wacom_wac->pen_name; wacom_wac->touch_input->name = wacom_wac->touch_name; wacom_wac->pad_input->name = wacom_wac->pad_name; return 0; } static int wacom_setup_inputs(struct wacom *wacom) { struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); int error = 0; pen_input_dev = wacom_wac->pen_input; touch_input_dev = wacom_wac->touch_input; pad_input_dev = wacom_wac->pad_input; if (!pen_input_dev || !touch_input_dev || !pad_input_dev) return -EINVAL; error = wacom_setup_pen_input_capabilities(pen_input_dev, wacom_wac); if (error) { /* no pen in use on this interface */ input_free_device(pen_input_dev); wacom_wac->pen_input = NULL; pen_input_dev = NULL; } error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac); if (error) { /* no touch in use on this interface */ input_free_device(touch_input_dev); wacom_wac->touch_input = NULL; touch_input_dev = NULL; } error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); if (error) { /* no pad events using this interface */ input_free_device(pad_input_dev); wacom_wac->pad_input = NULL; pad_input_dev = NULL; } return 0; } static int wacom_register_inputs(struct wacom *wacom) { struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); int error = 0; pen_input_dev = wacom_wac->pen_input; touch_input_dev = wacom_wac->touch_input; pad_input_dev = wacom_wac->pad_input; if (pen_input_dev) { error = input_register_device(pen_input_dev); if (error) goto fail; } if (touch_input_dev) { error = input_register_device(touch_input_dev); if (error) goto fail; } if (pad_input_dev) { error = input_register_device(pad_input_dev); if (error) goto fail; } return 0; fail: wacom_wac->pad_input = NULL; wacom_wac->touch_input = NULL; wacom_wac->pen_input = NULL; return error; } /* * Not all devices report physical dimensions from HID. * Compute the default from hardcoded logical dimension * and resolution before driver overwrites them. */ static void wacom_set_default_phy(struct wacom_features *features) { if (features->x_resolution) { features->x_phy = (features->x_max * 100) / features->x_resolution; features->y_phy = (features->y_max * 100) / features->y_resolution; } } static void wacom_calculate_res(struct wacom_features *features) { /* set unit to "100th of a mm" for devices not reported by HID */ if (!features->unit) { features->unit = 0x11; features->unitExpo = -3; } features->x_resolution = wacom_calc_hid_res(features->x_max, features->x_phy, features->unit, features->unitExpo); features->y_resolution = wacom_calc_hid_res(features->y_max, features->y_phy, features->unit, features->unitExpo); } void wacom_battery_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, battery_work); if ((wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) && !wacom->battery.battery) { wacom_initialize_battery(wacom); } else if (!(wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) && wacom->battery.battery) { wacom_destroy_battery(wacom); } } static size_t wacom_compute_pktlen(struct hid_device *hdev) { struct hid_report_enum *report_enum; struct hid_report *report; size_t size = 0; report_enum = hdev->report_enum + HID_INPUT_REPORT; list_for_each_entry(report, &report_enum->report_list, list) { size_t report_size = hid_report_len(report); if (report_size > size) size = report_size; } return size; } static void wacom_update_name(struct wacom *wacom, const char *suffix) { struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */ /* Generic devices name unspecified */ if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { char *product_name = wacom->hdev->name; if (hid_is_usb(wacom->hdev)) { struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); product_name = dev->product; } if (wacom->hdev->bus == BUS_I2C) { snprintf(name, sizeof(name), "%s %X", features->name, wacom->hdev->product); } else if (strstr(product_name, "Wacom") || strstr(product_name, "wacom") || strstr(product_name, "WACOM")) { if (strscpy(name, product_name, sizeof(name)) < 0) { hid_warn(wacom->hdev, "String overflow while assembling device name"); } } else { snprintf(name, sizeof(name), "Wacom %s", product_name); } /* strip out excess whitespaces */ while (1) { char *gap = strstr(name, " "); if (gap == NULL) break; /* shift everything including the terminator */ memmove(gap, gap+1, strlen(gap)); } /* get rid of trailing whitespace */ if (name[strlen(name)-1] == ' ') name[strlen(name)-1] = '\0'; } else { if (strscpy(name, features->name, sizeof(name)) < 0) { hid_warn(wacom->hdev, "String overflow while assembling device name"); } } snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s", name, suffix); /* Append the device type to the name */ snprintf(wacom_wac->pen_name, sizeof(wacom_wac->pen_name), "%s%s Pen", name, suffix); snprintf(wacom_wac->touch_name, sizeof(wacom_wac->touch_name), "%s%s Finger", name, suffix); snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name), "%s%s Pad", name, suffix); } static void wacom_release_resources(struct wacom *wacom) { struct hid_device *hdev = wacom->hdev; if (!wacom->resources) return; devres_release_group(&hdev->dev, wacom); wacom->resources = false; wacom->wacom_wac.pen_input = NULL; wacom->wacom_wac.touch_input = NULL; wacom->wacom_wac.pad_input = NULL; } static void wacom_set_shared_values(struct wacom_wac *wacom_wac) { if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) { wacom_wac->shared->type = wacom_wac->features.type; wacom_wac->shared->touch_input = wacom_wac->touch_input; } if (wacom_wac->has_mute_touch_switch) { wacom_wac->shared->has_mute_touch_switch = true; /* Hardware touch switch may be off. Wait until * we know the switch state to decide is_touch_on. * Softkey state should be initialized to "on" to * match historic default. */ if (wacom_wac->is_soft_touch_switch) wacom_wac->shared->is_touch_on = true; } if (wacom_wac->shared->has_mute_touch_switch && wacom_wac->shared->touch_input) { set_bit(EV_SW, wacom_wac->shared->touch_input->evbit); input_set_capability(wacom_wac->shared->touch_input, EV_SW, SW_MUTE_DEVICE); } } static int wacom_parse_and_register(struct wacom *wacom, bool wireless) { struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; struct hid_device *hdev = wacom->hdev; int error; unsigned int connect_mask = HID_CONNECT_HIDRAW; features->pktlen = wacom_compute_pktlen(hdev); if (features->pktlen > WACOM_PKGLEN_MAX) return -EINVAL; if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL)) return -ENOMEM; wacom->resources = true; error = wacom_allocate_inputs(wacom); if (error) goto fail; /* * Bamboo Pad has a generic hid handling for the Pen, and we switch it * into debug mode for the touch part. * We ignore the other interfaces. */ if (features->type == BAMBOO_PAD) { if (features->pktlen == WACOM_PKGLEN_PENABLED) { features->type = HID_GENERIC; } else if ((features->pktlen != WACOM_PKGLEN_BPAD_TOUCH) && (features->pktlen != WACOM_PKGLEN_BPAD_TOUCH_USB)) { error = -ENODEV; goto fail; } } /* set the default size in case we do not get them from hid */ wacom_set_default_phy(features); /* Retrieve the physical and logical size for touch devices */ wacom_retrieve_hid_descriptor(hdev, features); wacom_setup_device_quirks(wacom); if (features->device_type == WACOM_DEVICETYPE_NONE && features->type != WIRELESS) { error = features->type == HID_GENERIC ? -ENODEV : 0; dev_warn(&hdev->dev, "Unknown device_type for '%s'. %s.", hdev->name, error ? "Ignoring" : "Assuming pen"); if (error) goto fail; features->device_type |= WACOM_DEVICETYPE_PEN; } wacom_calculate_res(features); wacom_update_name(wacom, wireless ? " (WL)" : ""); /* pen only Bamboo neither support touch nor pad */ if ((features->type == BAMBOO_PEN) && ((features->device_type & WACOM_DEVICETYPE_TOUCH) || (features->device_type & WACOM_DEVICETYPE_PAD))) { error = -ENODEV; goto fail; } error = wacom_add_shared_data(hdev); if (error) goto fail; error = wacom_setup_inputs(wacom); if (error) goto fail; if (features->type == HID_GENERIC) connect_mask |= HID_CONNECT_DRIVER; /* Regular HID work starts now */ error = hid_hw_start(hdev, connect_mask); if (error) { hid_err(hdev, "hw start failed\n"); goto fail; } error = wacom_register_inputs(wacom); if (error) goto fail; if (wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD) { error = wacom_initialize_leds(wacom); if (error) goto fail; error = wacom_initialize_remotes(wacom); if (error) goto fail; } if (!wireless) { /* Note that if query fails it is not a hard failure */ wacom_query_tablet_data(wacom); } /* touch only Bamboo doesn't support pen */ if ((features->type == BAMBOO_TOUCH) && (features->device_type & WACOM_DEVICETYPE_PEN)) { cancel_delayed_work_sync(&wacom->init_work); _wacom_query_tablet_data(wacom); error = -ENODEV; goto fail_quirks; } if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) { error = hid_hw_open(hdev); if (error) { hid_err(hdev, "hw open failed\n"); goto fail_quirks; } } wacom_set_shared_values(wacom_wac); devres_close_group(&hdev->dev, wacom); return 0; fail_quirks: hid_hw_stop(hdev); fail: wacom_release_resources(wacom); return error; } static void wacom_wireless_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, wireless_work); struct usb_device *usbdev = wacom->usbdev; struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct hid_device *hdev1, *hdev2; struct wacom *wacom1, *wacom2; struct wacom_wac *wacom_wac1, *wacom_wac2; int error; /* * Regardless if this is a disconnect or a new tablet, * remove any existing input and battery devices. */ wacom_destroy_battery(wacom); if (!usbdev) return; /* Stylus interface */ hdev1 = usb_get_intfdata(usbdev->config->interface[1]); wacom1 = hid_get_drvdata(hdev1); wacom_wac1 = &(wacom1->wacom_wac); wacom_release_resources(wacom1); /* Touch interface */ hdev2 = usb_get_intfdata(usbdev->config->interface[2]); wacom2 = hid_get_drvdata(hdev2); wacom_wac2 = &(wacom2->wacom_wac); wacom_release_resources(wacom2); if (wacom_wac->pid == 0) { hid_info(wacom->hdev, "wireless tablet disconnected\n"); } else { const struct hid_device_id *id = wacom_ids; hid_info(wacom->hdev, "wireless tablet connected with PID %x\n", wacom_wac->pid); while (id->bus) { if (id->vendor == USB_VENDOR_ID_WACOM && id->product == wacom_wac->pid) break; id++; } if (!id->bus) { hid_info(wacom->hdev, "ignoring unknown PID.\n"); return; } /* Stylus interface */ wacom_wac1->features = *((struct wacom_features *)id->driver_data); wacom_wac1->pid = wacom_wac->pid; hid_hw_stop(hdev1); error = wacom_parse_and_register(wacom1, true); if (error) goto fail; /* Touch interface */ if (wacom_wac1->features.touch_max || (wacom_wac1->features.type >= INTUOSHT && wacom_wac1->features.type <= BAMBOO_PT)) { wacom_wac2->features = *((struct wacom_features *)id->driver_data); wacom_wac2->pid = wacom_wac->pid; hid_hw_stop(hdev2); error = wacom_parse_and_register(wacom2, true); if (error) goto fail; } if (strscpy(wacom_wac->name, wacom_wac1->name, sizeof(wacom_wac->name)) < 0) { hid_warn(wacom->hdev, "String overflow while assembling device name"); } } return; fail: wacom_release_resources(wacom1); wacom_release_resources(wacom2); return; } static void wacom_remote_destroy_battery(struct wacom *wacom, int index) { struct wacom_remote *remote = wacom->remote; if (remote->remotes[index].battery.battery) { devres_release_group(&wacom->hdev->dev, &remote->remotes[index].battery.bat_desc); remote->remotes[index].battery.battery = NULL; remote->remotes[index].active_time = 0; } } static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) { struct wacom_remote *remote = wacom->remote; u32 serial = remote->remotes[index].serial; int i; unsigned long flags; for (i = 0; i < WACOM_MAX_REMOTES; i++) { if (remote->remotes[i].serial == serial) { spin_lock_irqsave(&remote->remote_lock, flags); remote->remotes[i].registered = false; spin_unlock_irqrestore(&remote->remote_lock, flags); wacom_remote_destroy_battery(wacom, i); if (remote->remotes[i].group.name) devres_release_group(&wacom->hdev->dev, &remote->remotes[i]); remote->remotes[i].serial = 0; remote->remotes[i].group.name = NULL; wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN; } } } static int wacom_remote_create_one(struct wacom *wacom, u32 serial, unsigned int index) { struct wacom_remote *remote = wacom->remote; struct device *dev = &wacom->hdev->dev; int error, k; /* A remote can pair more than once with an EKR, * check to make sure this serial isn't already paired. */ for (k = 0; k < WACOM_MAX_REMOTES; k++) { if (remote->remotes[k].serial == serial) break; } if (k < WACOM_MAX_REMOTES) { remote->remotes[index].serial = serial; return 0; } if (!devres_open_group(dev, &remote->remotes[index], GFP_KERNEL)) return -ENOMEM; error = wacom_remote_create_attr_group(wacom, serial, index); if (error) goto fail; remote->remotes[index].input = wacom_allocate_input(wacom); if (!remote->remotes[index].input) { error = -ENOMEM; goto fail; } remote->remotes[index].input->uniq = remote->remotes[index].group.name; remote->remotes[index].input->name = wacom->wacom_wac.pad_name; if (!remote->remotes[index].input->name) { error = -EINVAL; goto fail; } error = wacom_setup_pad_input_capabilities(remote->remotes[index].input, &wacom->wacom_wac); if (error) goto fail; remote->remotes[index].serial = serial; error = input_register_device(remote->remotes[index].input); if (error) goto fail; error = wacom_led_groups_alloc_and_register_one( &remote->remotes[index].input->dev, wacom, index, 3, true); if (error) goto fail; remote->remotes[index].registered = true; devres_close_group(dev, &remote->remotes[index]); return 0; fail: devres_release_group(dev, &remote->remotes[index]); remote->remotes[index].serial = 0; return error; } static int wacom_remote_attach_battery(struct wacom *wacom, int index) { struct wacom_remote *remote = wacom->remote; int error; if (!remote->remotes[index].registered) return 0; if (remote->remotes[index].battery.battery) return 0; if (!remote->remotes[index].active_time) return 0; if (wacom->led.groups[index].select == WACOM_STATUS_UNKNOWN) return 0; error = __wacom_initialize_battery(wacom, &wacom->remote->remotes[index].battery); if (error) return error; return 0; } static void wacom_remote_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, remote_work); struct wacom_remote *remote = wacom->remote; ktime_t kt = ktime_get(); struct wacom_remote_work_data remote_work_data; unsigned long flags; unsigned int count; u32 work_serial; int i; spin_lock_irqsave(&remote->remote_lock, flags); count = kfifo_out(&remote->remote_fifo, &remote_work_data, sizeof(remote_work_data)); if (count != sizeof(remote_work_data)) { hid_err(wacom->hdev, "workitem triggered without status available\n"); spin_unlock_irqrestore(&remote->remote_lock, flags); return; } if (!kfifo_is_empty(&remote->remote_fifo)) wacom_schedule_work(&wacom->wacom_wac, WACOM_WORKER_REMOTE); spin_unlock_irqrestore(&remote->remote_lock, flags); for (i = 0; i < WACOM_MAX_REMOTES; i++) { work_serial = remote_work_data.remote[i].serial; if (work_serial) { if (kt - remote->remotes[i].active_time > WACOM_REMOTE_BATTERY_TIMEOUT && remote->remotes[i].active_time != 0) wacom_remote_destroy_battery(wacom, i); if (remote->remotes[i].serial == work_serial) { wacom_remote_attach_battery(wacom, i); continue; } if (remote->remotes[i].serial) wacom_remote_destroy_one(wacom, i); wacom_remote_create_one(wacom, work_serial, i); } else if (remote->remotes[i].serial) { wacom_remote_destroy_one(wacom, i); } } } static void wacom_mode_change_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, mode_change_work); struct wacom_shared *shared = wacom->wacom_wac.shared; struct wacom *wacom1 = NULL; struct wacom *wacom2 = NULL; bool is_direct = wacom->wacom_wac.is_direct_mode; int error = 0; if (shared->pen) { wacom1 = hid_get_drvdata(shared->pen); wacom_release_resources(wacom1); hid_hw_stop(wacom1->hdev); wacom1->wacom_wac.has_mode_change = true; wacom1->wacom_wac.is_direct_mode = is_direct; } if (shared->touch) { wacom2 = hid_get_drvdata(shared->touch); wacom_release_resources(wacom2); hid_hw_stop(wacom2->hdev); wacom2->wacom_wac.has_mode_change = true; wacom2->wacom_wac.is_direct_mode = is_direct; } if (wacom1) { error = wacom_parse_and_register(wacom1, false); if (error) return; } if (wacom2) { error = wacom_parse_and_register(wacom2, false); if (error) return; } return; } static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; int error; if (!id->driver_data) return -EINVAL; hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; /* hid-core sets this quirk for the boot interface */ hdev->quirks &= ~HID_QUIRK_NOGET; wacom = devm_kzalloc(&hdev->dev, sizeof(struct wacom), GFP_KERNEL); if (!wacom) return -ENOMEM; hid_set_drvdata(hdev, wacom); wacom->hdev = hdev; wacom_wac = &wacom->wacom_wac; wacom_wac->features = *((struct wacom_features *)id->driver_data); features = &wacom_wac->features; if (features->check_for_hid_type && features->hid_type != hdev->type) return -ENODEV; error = wacom_devm_kfifo_alloc(wacom); if (error) return error; wacom_wac->hid_data.inputmode = -1; wacom_wac->mode_report = -1; if (hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); wacom->usbdev = dev; wacom->intf = intf; } mutex_init(&wacom->lock); INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work); INIT_DELAYED_WORK(&wacom->aes_battery_work, wacom_aes_battery_handler); INIT_WORK(&wacom->wireless_work, wacom_wireless_work); INIT_WORK(&wacom->battery_work, wacom_battery_work); INIT_WORK(&wacom->remote_work, wacom_remote_work); INIT_WORK(&wacom->mode_change_work, wacom_mode_change_work); timer_setup(&wacom->idleprox_timer, &wacom_idleprox_timeout, TIMER_DEFERRABLE); /* ask for the report descriptor to be loaded by HID */ error = hid_parse(hdev); if (error) { hid_err(hdev, "parse failed\n"); return error; } if (features->type == BOOTLOADER) { hid_warn(hdev, "Using device in hidraw-only mode"); return hid_hw_start(hdev, HID_CONNECT_HIDRAW); } error = wacom_parse_and_register(wacom, false); if (error) return error; if (hdev->bus == BUS_BLUETOOTH) { error = device_create_file(&hdev->dev, &dev_attr_speed); if (error) hid_warn(hdev, "can't create sysfs speed attribute err: %d\n", error); } wacom_wac->probe_complete = true; return 0; } static void wacom_remove(struct hid_device *hdev) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) hid_hw_close(hdev); hid_hw_stop(hdev); cancel_delayed_work_sync(&wacom->init_work); cancel_work_sync(&wacom->wireless_work); cancel_work_sync(&wacom->battery_work); cancel_work_sync(&wacom->remote_work); cancel_work_sync(&wacom->mode_change_work); del_timer_sync(&wacom->idleprox_timer); if (hdev->bus == BUS_BLUETOOTH) device_remove_file(&hdev->dev, &dev_attr_speed); /* make sure we don't trigger the LEDs */ wacom_led_groups_release(wacom); if (wacom->wacom_wac.features.type != REMOTE) wacom_release_resources(wacom); } #ifdef CONFIG_PM static int wacom_resume(struct hid_device *hdev) { struct wacom *wacom = hid_get_drvdata(hdev); mutex_lock(&wacom->lock); /* switch to wacom mode first */ _wacom_query_tablet_data(wacom); wacom_led_control(wacom); mutex_unlock(&wacom->lock); return 0; } static int wacom_reset_resume(struct hid_device *hdev) { return wacom_resume(hdev); } #endif /* CONFIG_PM */ static struct hid_driver wacom_driver = { .name = "wacom", .id_table = wacom_ids, .probe = wacom_probe, .remove = wacom_remove, .report = wacom_wac_report, #ifdef CONFIG_PM .resume = wacom_resume, .reset_resume = wacom_reset_resume, #endif .raw_event = wacom_raw_event, }; module_hid_driver(wacom_driver); MODULE_VERSION(DRIVER_VERSION); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
63 63 63 63 63 62 40 62 62 40 63 63 63 63 72 72 72 71 72 2 62 63 62 63 63 63 6 63 63 63 63 63 6 6 6 6 6 57 72 6 105 105 104 1 1 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 63 63 63 63 57 57 57 57 62 63 63 63 64 64 63 63 62 63 57 57 6 62 57 63 57 57 64 64 64 72 59 71 72 72 67 67 67 66 67 67 67 67 66 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 | // SPDX-License-Identifier: GPL-2.0 /* * main.c - Multi purpose firmware loading support * * Copyright (c) 2003 Manuel Estrada Sainz * * Please see Documentation/driver-api/firmware/ for more information. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/device.h> #include <linux/kernel_read_file.h> #include <linux/module.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/highmem.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/list.h> #include <linux/fs.h> #include <linux/async.h> #include <linux/pm.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> #include <linux/zstd.h> #include <linux/xz.h> #include <generated/utsrelease.h> #include "../base.h" #include "firmware.h" #include "fallback.h" MODULE_AUTHOR("Manuel Estrada Sainz"); MODULE_DESCRIPTION("Multi purpose firmware loading support"); MODULE_LICENSE("GPL"); struct firmware_cache { /* firmware_buf instance will be added into the below list */ spinlock_t lock; struct list_head head; int state; #ifdef CONFIG_FW_CACHE /* * Names of firmware images which have been cached successfully * will be added into the below list so that device uncache * helper can trace which firmware images have been cached * before. */ spinlock_t name_lock; struct list_head fw_names; struct delayed_work work; struct notifier_block pm_notify; #endif }; struct fw_cache_entry { struct list_head list; const char *name; }; struct fw_name_devm { unsigned long magic; const char *name; }; static inline struct fw_priv *to_fw_priv(struct kref *ref) { return container_of(ref, struct fw_priv, ref); } #define FW_LOADER_NO_CACHE 0 #define FW_LOADER_START_CACHE 1 /* fw_lock could be moved to 'struct fw_sysfs' but since it is just * guarding for corner cases a global lock should be OK */ DEFINE_MUTEX(fw_lock); struct firmware_cache fw_cache; bool fw_load_abort_all; void fw_state_init(struct fw_priv *fw_priv) { struct fw_state *fw_st = &fw_priv->fw_st; init_completion(&fw_st->completion); fw_st->status = FW_STATUS_UNKNOWN; } static inline int fw_state_wait(struct fw_priv *fw_priv) { return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT); } static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv); static struct fw_priv *__allocate_fw_priv(const char *fw_name, struct firmware_cache *fwc, void *dbuf, size_t size, size_t offset, u32 opt_flags) { struct fw_priv *fw_priv; /* For a partial read, the buffer must be preallocated. */ if ((opt_flags & FW_OPT_PARTIAL) && !dbuf) return NULL; /* Only partial reads are allowed to use an offset. */ if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL)) return NULL; fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC); if (!fw_priv) return NULL; fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC); if (!fw_priv->fw_name) { kfree(fw_priv); return NULL; } kref_init(&fw_priv->ref); fw_priv->fwc = fwc; fw_priv->data = dbuf; fw_priv->allocated_size = size; fw_priv->offset = offset; fw_priv->opt_flags = opt_flags; fw_state_init(fw_priv); #ifdef CONFIG_FW_LOADER_USER_HELPER INIT_LIST_HEAD(&fw_priv->pending_list); #endif pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv); return fw_priv; } static struct fw_priv *__lookup_fw_priv(const char *fw_name) { struct fw_priv *tmp; struct firmware_cache *fwc = &fw_cache; list_for_each_entry(tmp, &fwc->head, list) if (!strcmp(tmp->fw_name, fw_name)) return tmp; return NULL; } /* Returns 1 for batching firmware requests with the same name */ int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc, struct fw_priv **fw_priv, void *dbuf, size_t size, size_t offset, u32 opt_flags) { struct fw_priv *tmp; spin_lock(&fwc->lock); /* * Do not merge requests that are marked to be non-cached or * are performing partial reads. */ if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) { tmp = __lookup_fw_priv(fw_name); if (tmp) { kref_get(&tmp->ref); spin_unlock(&fwc->lock); *fw_priv = tmp; pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); return 1; } } tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags); if (tmp) { INIT_LIST_HEAD(&tmp->list); if (!(opt_flags & FW_OPT_NOCACHE)) list_add(&tmp->list, &fwc->head); } spin_unlock(&fwc->lock); *fw_priv = tmp; return tmp ? 0 : -ENOMEM; } static void __free_fw_priv(struct kref *ref) __releases(&fwc->lock) { struct fw_priv *fw_priv = to_fw_priv(ref); struct firmware_cache *fwc = fw_priv->fwc; pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n", __func__, fw_priv->fw_name, fw_priv, fw_priv->data, (unsigned int)fw_priv->size); list_del(&fw_priv->list); spin_unlock(&fwc->lock); if (fw_is_paged_buf(fw_priv)) fw_free_paged_buf(fw_priv); else if (!fw_priv->allocated_size) vfree(fw_priv->data); kfree_const(fw_priv->fw_name); kfree(fw_priv); } void free_fw_priv(struct fw_priv *fw_priv) { struct firmware_cache *fwc = fw_priv->fwc; spin_lock(&fwc->lock); if (!kref_put(&fw_priv->ref, __free_fw_priv)) spin_unlock(&fwc->lock); } #ifdef CONFIG_FW_LOADER_PAGED_BUF bool fw_is_paged_buf(struct fw_priv *fw_priv) { return fw_priv->is_paged_buf; } void fw_free_paged_buf(struct fw_priv *fw_priv) { int i; if (!fw_priv->pages) return; vunmap(fw_priv->data); for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kvfree(fw_priv->pages); fw_priv->pages = NULL; fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; fw_priv->data = NULL; fw_priv->size = 0; } int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { /* If the array of pages is too small, grow it */ if (fw_priv->page_array_size < pages_needed) { int new_array_size = max(pages_needed, fw_priv->page_array_size * 2); struct page **new_pages; new_pages = kvmalloc_array(new_array_size, sizeof(void *), GFP_KERNEL); if (!new_pages) return -ENOMEM; memcpy(new_pages, fw_priv->pages, fw_priv->page_array_size * sizeof(void *)); memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * (new_array_size - fw_priv->page_array_size)); kvfree(fw_priv->pages); fw_priv->pages = new_pages; fw_priv->page_array_size = new_array_size; } while (fw_priv->nr_pages < pages_needed) { fw_priv->pages[fw_priv->nr_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!fw_priv->pages[fw_priv->nr_pages]) return -ENOMEM; fw_priv->nr_pages++; } return 0; } int fw_map_paged_buf(struct fw_priv *fw_priv) { /* one pages buffer should be mapped/unmapped only once */ if (!fw_priv->pages) return 0; vunmap(fw_priv->data); fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0, PAGE_KERNEL_RO); if (!fw_priv->data) return -ENOMEM; return 0; } #endif /* * ZSTD-compressed firmware support */ #ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv, size_t in_size, const void *in_buffer) { size_t len, out_size, workspace_size; void *workspace, *out_buf; zstd_dctx *ctx; int err; if (fw_priv->allocated_size) { out_size = fw_priv->allocated_size; out_buf = fw_priv->data; } else { zstd_frame_header params; if (zstd_get_frame_header(¶ms, in_buffer, in_size) || params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) { dev_dbg(dev, "%s: invalid zstd header\n", __func__); return -EINVAL; } out_size = params.frameContentSize; out_buf = vzalloc(out_size); if (!out_buf) return -ENOMEM; } workspace_size = zstd_dctx_workspace_bound(); workspace = kvzalloc(workspace_size, GFP_KERNEL); if (!workspace) { err = -ENOMEM; goto error; } ctx = zstd_init_dctx(workspace, workspace_size); if (!ctx) { dev_dbg(dev, "%s: failed to initialize context\n", __func__); err = -EINVAL; goto error; } len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size); if (zstd_is_error(len)) { dev_dbg(dev, "%s: failed to decompress: %d\n", __func__, zstd_get_error_code(len)); err = -EINVAL; goto error; } if (!fw_priv->allocated_size) fw_priv->data = out_buf; fw_priv->size = len; err = 0; error: kvfree(workspace); if (err && !fw_priv->allocated_size) vfree(out_buf); return err; } #endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */ /* * XZ-compressed firmware support */ #ifdef CONFIG_FW_LOADER_COMPRESS_XZ /* show an error and return the standard error code */ static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret) { if (xz_ret != XZ_STREAM_END) { dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret); return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL; } return 0; } /* single-shot decompression onto the pre-allocated buffer */ static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv, size_t in_size, const void *in_buffer) { struct xz_dec *xz_dec; struct xz_buf xz_buf; enum xz_ret xz_ret; xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1); if (!xz_dec) return -ENOMEM; xz_buf.in_size = in_size; xz_buf.in = in_buffer; xz_buf.in_pos = 0; xz_buf.out_size = fw_priv->allocated_size; xz_buf.out = fw_priv->data; xz_buf.out_pos = 0; xz_ret = xz_dec_run(xz_dec, &xz_buf); xz_dec_end(xz_dec); fw_priv->size = xz_buf.out_pos; return fw_decompress_xz_error(dev, xz_ret); } /* decompression on paged buffer and map it */ static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv, size_t in_size, const void *in_buffer) { struct xz_dec *xz_dec; struct xz_buf xz_buf; enum xz_ret xz_ret; struct page *page; int err = 0; xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1); if (!xz_dec) return -ENOMEM; xz_buf.in_size = in_size; xz_buf.in = in_buffer; xz_buf.in_pos = 0; fw_priv->is_paged_buf = true; fw_priv->size = 0; do { if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) { err = -ENOMEM; goto out; } /* decompress onto the new allocated page */ page = fw_priv->pages[fw_priv->nr_pages - 1]; xz_buf.out = kmap_local_page(page); xz_buf.out_pos = 0; xz_buf.out_size = PAGE_SIZE; xz_ret = xz_dec_run(xz_dec, &xz_buf); kunmap_local(xz_buf.out); fw_priv->size += xz_buf.out_pos; /* partial decompression means either end or error */ if (xz_buf.out_pos != PAGE_SIZE) break; } while (xz_ret == XZ_OK); err = fw_decompress_xz_error(dev, xz_ret); if (!err) err = fw_map_paged_buf(fw_priv); out: xz_dec_end(xz_dec); return err; } static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv, size_t in_size, const void *in_buffer) { /* if the buffer is pre-allocated, we can perform in single-shot mode */ if (fw_priv->data) return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer); else return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer); } #endif /* CONFIG_FW_LOADER_COMPRESS_XZ */ /* direct firmware loading support */ static char fw_path_para[256]; static const char * const fw_path[] = { fw_path_para, "/lib/firmware/updates/" UTS_RELEASE, "/lib/firmware/updates", "/lib/firmware/" UTS_RELEASE, "/lib/firmware" }; /* * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH' * from kernel command line because firmware_class is generally built in * kernel instead of module. */ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); static int fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, const char *suffix, int (*decompress)(struct device *dev, struct fw_priv *fw_priv, size_t in_size, const void *in_buffer)) { size_t size; int i, len, maxlen = 0; int rc = -ENOENT; char *path, *nt = NULL; size_t msize = INT_MAX; void *buffer = NULL; /* Already populated data member means we're loading into a buffer */ if (!decompress && fw_priv->data) { buffer = fw_priv->data; msize = fw_priv->allocated_size; } path = __getname(); if (!path) return -ENOMEM; wait_for_initramfs(); for (i = 0; i < ARRAY_SIZE(fw_path); i++) { size_t file_size = 0; size_t *file_size_ptr = NULL; /* skip the unset customized path */ if (!fw_path[i][0]) continue; /* strip off \n from customized path */ maxlen = strlen(fw_path[i]); if (i == 0) { nt = strchr(fw_path[i], '\n'); if (nt) maxlen = nt - fw_path[i]; } len = snprintf(path, PATH_MAX, "%.*s/%s%s", maxlen, fw_path[i], fw_priv->fw_name, suffix); if (len >= PATH_MAX) { rc = -ENAMETOOLONG; break; } fw_priv->size = 0; /* * The total file size is only examined when doing a partial * read; the "full read" case needs to fail if the whole * firmware was not completely loaded. */ if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer) file_size_ptr = &file_size; /* load firmware files from the mount namespace of init */ rc = kernel_read_file_from_path_initns(path, fw_priv->offset, &buffer, msize, file_size_ptr, READING_FIRMWARE); if (rc < 0) { if (!(fw_priv->opt_flags & FW_OPT_NO_WARN)) { if (rc != -ENOENT) dev_warn(device, "loading %s failed with error %d\n", path, rc); else dev_dbg(device, "loading %s failed for no such file or directory.\n", path); } continue; } size = rc; rc = 0; dev_dbg(device, "Loading firmware from %s\n", path); if (decompress) { dev_dbg(device, "f/w decompressing %s\n", fw_priv->fw_name); rc = decompress(device, fw_priv, size, buffer); /* discard the superfluous original content */ vfree(buffer); buffer = NULL; if (rc) { fw_free_paged_buf(fw_priv); continue; } } else { dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name); if (!fw_priv->data) fw_priv->data = buffer; fw_priv->size = size; } fw_state_done(fw_priv); break; } __putname(path); return rc; } /* firmware holds the ownership of pages */ static void firmware_free_data(const struct firmware *fw) { /* Loaded directly? */ if (!fw->priv) { vfree(fw->data); return; } free_fw_priv(fw->priv); } /* store the pages buffer info firmware from buf */ static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw) { fw->priv = fw_priv; fw->size = fw_priv->size; fw->data = fw_priv->data; pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n", __func__, fw_priv->fw_name, fw_priv, fw_priv->data, (unsigned int)fw_priv->size); } #ifdef CONFIG_FW_CACHE static void fw_name_devm_release(struct device *dev, void *res) { struct fw_name_devm *fwn = res; if (fwn->magic == (unsigned long)&fw_cache) pr_debug("%s: fw_name-%s devm-%p released\n", __func__, fwn->name, res); kfree_const(fwn->name); } static int fw_devm_match(struct device *dev, void *res, void *match_data) { struct fw_name_devm *fwn = res; return (fwn->magic == (unsigned long)&fw_cache) && !strcmp(fwn->name, match_data); } static struct fw_name_devm *fw_find_devm_name(struct device *dev, const char *name) { struct fw_name_devm *fwn; fwn = devres_find(dev, fw_name_devm_release, fw_devm_match, (void *)name); return fwn; } static bool fw_cache_is_setup(struct device *dev, const char *name) { struct fw_name_devm *fwn; fwn = fw_find_devm_name(dev, name); if (fwn) return true; return false; } /* add firmware name into devres list */ static int fw_add_devm_name(struct device *dev, const char *name) { struct fw_name_devm *fwn; if (fw_cache_is_setup(dev, name)) return 0; fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm), GFP_KERNEL); if (!fwn) return -ENOMEM; fwn->name = kstrdup_const(name, GFP_KERNEL); if (!fwn->name) { devres_free(fwn); return -ENOMEM; } fwn->magic = (unsigned long)&fw_cache; devres_add(dev, fwn); return 0; } #else static bool fw_cache_is_setup(struct device *dev, const char *name) { return false; } static int fw_add_devm_name(struct device *dev, const char *name) { return 0; } #endif int assign_fw(struct firmware *fw, struct device *device) { struct fw_priv *fw_priv = fw->priv; int ret; mutex_lock(&fw_lock); if (!fw_priv->size || fw_state_is_aborted(fw_priv)) { mutex_unlock(&fw_lock); return -ENOENT; } /* * add firmware name into devres list so that we can auto cache * and uncache firmware for device. * * device may has been deleted already, but the problem * should be fixed in devres or driver core. */ /* don't cache firmware handled without uevent */ if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) && !(fw_priv->opt_flags & FW_OPT_NOCACHE)) { ret = fw_add_devm_name(device, fw_priv->fw_name); if (ret) { mutex_unlock(&fw_lock); return ret; } } /* * After caching firmware image is started, let it piggyback * on request firmware. */ if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) && fw_priv->fwc->state == FW_LOADER_START_CACHE) fw_cache_piggyback_on_request(fw_priv); /* pass the pages buffer to driver at the last minute */ fw_set_page_data(fw_priv, fw); mutex_unlock(&fw_lock); return 0; } /* prepare firmware and firmware_buf structs; * return 0 if a firmware is already assigned, 1 if need to load one, * or a negative error code */ static int _request_firmware_prepare(struct firmware **firmware_p, const char *name, struct device *device, void *dbuf, size_t size, size_t offset, u32 opt_flags) { struct firmware *firmware; struct fw_priv *fw_priv; int ret; *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", __func__); return -ENOMEM; } if (firmware_request_builtin_buf(firmware, name, dbuf, size)) { dev_dbg(device, "using built-in %s\n", name); return 0; /* assigned */ } ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size, offset, opt_flags); /* * bind with 'priv' now to avoid warning in failure path * of requesting firmware. */ firmware->priv = fw_priv; if (ret > 0) { ret = fw_state_wait(fw_priv); if (!ret) { fw_set_page_data(fw_priv, firmware); return 0; /* assigned */ } } if (ret < 0) return ret; return 1; /* need to load */ } /* * Batched requests need only one wake, we need to do this step last due to the * fallback mechanism. The buf is protected with kref_get(), and it won't be * released until the last user calls release_firmware(). * * Failed batched requests are possible as well, in such cases we just share * the struct fw_priv and won't release it until all requests are woken * and have gone through this same path. */ static void fw_abort_batch_reqs(struct firmware *fw) { struct fw_priv *fw_priv; /* Loaded directly? */ if (!fw || !fw->priv) return; fw_priv = fw->priv; mutex_lock(&fw_lock); if (!fw_state_is_aborted(fw_priv)) fw_state_aborted(fw_priv); mutex_unlock(&fw_lock); } #if defined(CONFIG_FW_LOADER_DEBUG) #include <crypto/hash.h> #include <crypto/sha2.h> static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device) { struct shash_desc *shash; struct crypto_shash *alg; u8 *sha256buf; char *outbuf; alg = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(alg)) return; sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL); shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL); if (!sha256buf || !outbuf || !shash) goto out_free; shash->tfm = alg; if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0) goto out_shash; for (int i = 0; i < SHA256_DIGEST_SIZE; i++) sprintf(&outbuf[i * 2], "%02x", sha256buf[i]); outbuf[SHA256_BLOCK_SIZE] = 0; dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf); out_shash: crypto_free_shash(alg); out_free: kfree(shash); kfree(outbuf); kfree(sha256buf); } #else static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device) {} #endif /* * Reject firmware file names with ".." path components. * There are drivers that construct firmware file names from device-supplied * strings, and we don't want some device to be able to tell us "I would like to * be sent my firmware from ../../../etc/shadow, please". * * Search for ".." surrounded by either '/' or start/end of string. * * This intentionally only looks at the firmware name, not at the firmware base * directory or at symlink contents. */ static bool name_contains_dotdot(const char *name) { size_t name_len = strlen(name); return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || strstr(name, "/../") != NULL || (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); } /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, struct device *device, void *buf, size_t size, size_t offset, u32 opt_flags) { struct firmware *fw = NULL; struct cred *kern_cred = NULL; const struct cred *old_cred; bool nondirect = false; int ret; if (!firmware_p) return -EINVAL; if (!name || name[0] == '\0') { ret = -EINVAL; goto out; } if (name_contains_dotdot(name)) { dev_warn(device, "Firmware load for '%s' refused, path contains '..' component\n", name); ret = -EINVAL; goto out; } ret = _request_firmware_prepare(&fw, name, device, buf, size, offset, opt_flags); if (ret <= 0) /* error or already assigned */ goto out; /* * We are about to try to access the firmware file. Because we may have been * called by a driver when serving an unrelated request from userland, we use * the kernel credentials to read the file. */ kern_cred = prepare_kernel_cred(&init_task); if (!kern_cred) { ret = -ENOMEM; goto out; } old_cred = override_creds(kern_cred); ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); /* Only full reads can support decompression, platform, and sysfs. */ if (!(opt_flags & FW_OPT_PARTIAL)) nondirect = true; #ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD if (ret == -ENOENT && nondirect) ret = fw_get_filesystem_firmware(device, fw->priv, ".zst", fw_decompress_zstd); #endif #ifdef CONFIG_FW_LOADER_COMPRESS_XZ if (ret == -ENOENT && nondirect) ret = fw_get_filesystem_firmware(device, fw->priv, ".xz", fw_decompress_xz); #endif if (ret == -ENOENT && nondirect) ret = firmware_fallback_platform(fw->priv); if (ret) { if (!(opt_flags & FW_OPT_NO_WARN)) dev_warn(device, "Direct firmware load for %s failed with error %d\n", name, ret); if (nondirect) ret = firmware_fallback_sysfs(fw, name, device, opt_flags, ret); } else ret = assign_fw(fw, device); revert_creds(old_cred); put_cred(kern_cred); out: if (ret < 0) { fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; } else { fw_log_firmware_info(fw, name, device); } *firmware_p = fw; return ret; } /** * request_firmware() - send firmware request and wait for it * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * @firmware_p will be used to return a firmware image by the name * of @name for device @device. * * Should be called from user context where sleeping is allowed. * * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. * It must not contain any ".." path components - "foo/bar..bin" is * allowed, but "foo/../bar.bin" is not. * * Caller must hold the reference count of @device. * * The function can be called safely inside device's suspend and * resume callback. **/ int request_firmware(const struct firmware **firmware_p, const char *name, struct device *device) { int ret; /* Need to pin this module until return */ __module_get(THIS_MODULE); ret = _request_firmware(firmware_p, name, device, NULL, 0, 0, FW_OPT_UEVENT); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL(request_firmware); /** * firmware_request_nowarn() - request for an optional fw module * @firmware: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * This function is similar in behaviour to request_firmware(), except it * doesn't produce warning messages when the file is not found. The sysfs * fallback mechanism is enabled if direct filesystem lookup fails. However, * failures to find the firmware file with it are still suppressed. It is * therefore up to the driver to check for the return value of this call and to * decide when to inform the users of errors. **/ int firmware_request_nowarn(const struct firmware **firmware, const char *name, struct device *device) { int ret; /* Need to pin this module until return */ __module_get(THIS_MODULE); ret = _request_firmware(firmware, name, device, NULL, 0, 0, FW_OPT_UEVENT | FW_OPT_NO_WARN); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL_GPL(firmware_request_nowarn); /** * request_firmware_direct() - load firmware directly without usermode helper * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * This function works pretty much like request_firmware(), but this doesn't * fall back to usermode helper even if the firmware couldn't be loaded * directly from fs. Hence it's useful for loading optional firmwares, which * aren't always present, without extra long timeouts of udev. **/ int request_firmware_direct(const struct firmware **firmware_p, const char *name, struct device *device) { int ret; __module_get(THIS_MODULE); ret = _request_firmware(firmware_p, name, device, NULL, 0, 0, FW_OPT_UEVENT | FW_OPT_NO_WARN | FW_OPT_NOFALLBACK_SYSFS); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL_GPL(request_firmware_direct); /** * firmware_request_platform() - request firmware with platform-fw fallback * @firmware: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * This function is similar in behaviour to request_firmware, except that if * direct filesystem lookup fails, it will fallback to looking for a copy of the * requested firmware embedded in the platform's main (e.g. UEFI) firmware. **/ int firmware_request_platform(const struct firmware **firmware, const char *name, struct device *device) { int ret; /* Need to pin this module until return */ __module_get(THIS_MODULE); ret = _request_firmware(firmware, name, device, NULL, 0, 0, FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL_GPL(firmware_request_platform); /** * firmware_request_cache() - cache firmware for suspend so resume can use it * @name: name of firmware file * @device: device for which firmware should be cached for * * There are some devices with an optimization that enables the device to not * require loading firmware on system reboot. This optimization may still * require the firmware present on resume from suspend. This routine can be * used to ensure the firmware is present on resume from suspend in these * situations. This helper is not compatible with drivers which use * request_firmware_into_buf() or request_firmware_nowait() with no uevent set. **/ int firmware_request_cache(struct device *device, const char *name) { int ret; mutex_lock(&fw_lock); ret = fw_add_devm_name(device, name); mutex_unlock(&fw_lock); return ret; } EXPORT_SYMBOL_GPL(firmware_request_cache); /** * request_firmware_into_buf() - load firmware into a previously allocated buffer * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded and DMA region allocated * @buf: address of buffer to load firmware into * @size: size of buffer * * This function works pretty much like request_firmware(), but it doesn't * allocate a buffer to hold the firmware data. Instead, the firmware * is loaded directly into the buffer pointed to by @buf and the @firmware_p * data member is pointed at @buf. * * This function doesn't cache firmware either. */ int request_firmware_into_buf(const struct firmware **firmware_p, const char *name, struct device *device, void *buf, size_t size) { int ret; if (fw_cache_is_setup(device, name)) return -EOPNOTSUPP; __module_get(THIS_MODULE); ret = _request_firmware(firmware_p, name, device, buf, size, 0, FW_OPT_UEVENT | FW_OPT_NOCACHE); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL(request_firmware_into_buf); /** * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded and DMA region allocated * @buf: address of buffer to load firmware into * @size: size of buffer * @offset: offset into file to read * * This function works pretty much like request_firmware_into_buf except * it allows a partial read of the file. */ int request_partial_firmware_into_buf(const struct firmware **firmware_p, const char *name, struct device *device, void *buf, size_t size, size_t offset) { int ret; if (fw_cache_is_setup(device, name)) return -EOPNOTSUPP; __module_get(THIS_MODULE); ret = _request_firmware(firmware_p, name, device, buf, size, offset, FW_OPT_UEVENT | FW_OPT_NOCACHE | FW_OPT_PARTIAL); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL(request_partial_firmware_into_buf); /** * release_firmware() - release the resource associated with a firmware image * @fw: firmware resource to release **/ void release_firmware(const struct firmware *fw) { if (fw) { if (!firmware_is_builtin(fw)) firmware_free_data(fw); kfree(fw); } } EXPORT_SYMBOL(release_firmware); /* Async support */ struct firmware_work { struct work_struct work; struct module *module; const char *name; struct device *device; void *context; void (*cont)(const struct firmware *fw, void *context); u32 opt_flags; }; static void request_firmware_work_func(struct work_struct *work) { struct firmware_work *fw_work; const struct firmware *fw; fw_work = container_of(work, struct firmware_work, work); _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0, fw_work->opt_flags); fw_work->cont(fw, fw_work->context); put_device(fw_work->device); /* taken in request_firmware_nowait() */ module_put(fw_work->module); kfree_const(fw_work->name); kfree(fw_work); } static int _request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context), bool nowarn) { struct firmware_work *fw_work; fw_work = kzalloc(sizeof(struct firmware_work), gfp); if (!fw_work) return -ENOMEM; fw_work->module = module; fw_work->name = kstrdup_const(name, gfp); if (!fw_work->name) { kfree(fw_work); return -ENOMEM; } fw_work->device = device; fw_work->context = context; fw_work->cont = cont; fw_work->opt_flags = FW_OPT_NOWAIT | (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER) | (nowarn ? FW_OPT_NO_WARN : 0); if (!uevent && fw_cache_is_setup(device, name)) { kfree_const(fw_work->name); kfree(fw_work); return -EOPNOTSUPP; } if (!try_module_get(module)) { kfree_const(fw_work->name); kfree(fw_work); return -EFAULT; } get_device(fw_work->device); INIT_WORK(&fw_work->work, request_firmware_work_func); schedule_work(&fw_work->work); return 0; } /** * request_firmware_nowait() - asynchronous version of request_firmware * @module: module requesting the firmware * @uevent: sends uevent to copy the firmware image if this flag * is non-zero else the firmware copy must be done manually. * @name: name of firmware file * @device: device for which firmware is being loaded * @gfp: allocation flags * @context: will be passed over to @cont, and * @fw may be %NULL if firmware request fails. * @cont: function will be called asynchronously when the firmware * request is over. * * Caller must hold the reference count of @device. * * Asynchronous variant of request_firmware() for user contexts: * - sleep for as small periods as possible since it may * increase kernel boot time of built-in device drivers * requesting firmware in their ->probe() methods, if * @gfp is GFP_KERNEL. * * - can't sleep at all if @gfp is GFP_ATOMIC. **/ int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { return _request_firmware_nowait(module, uevent, name, device, gfp, context, cont, false); } EXPORT_SYMBOL(request_firmware_nowait); /** * firmware_request_nowait_nowarn() - async version of request_firmware_nowarn * @module: module requesting the firmware * @name: name of firmware file * @device: device for which firmware is being loaded * @gfp: allocation flags * @context: will be passed over to @cont, and * @fw may be %NULL if firmware request fails. * @cont: function will be called asynchronously when the firmware * request is over. * * Similar in function to request_firmware_nowait(), but doesn't print a warning * when the firmware file could not be found and always sends a uevent to copy * the firmware image. */ int firmware_request_nowait_nowarn( struct module *module, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { return _request_firmware_nowait(module, FW_ACTION_UEVENT, name, device, gfp, context, cont, true); } EXPORT_SYMBOL_GPL(firmware_request_nowait_nowarn); #ifdef CONFIG_FW_CACHE static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); /** * cache_firmware() - cache one firmware image in kernel memory space * @fw_name: the firmware image name * * Cache firmware in kernel memory so that drivers can use it when * system isn't ready for them to request firmware image from userspace. * Once it returns successfully, driver can use request_firmware or its * nowait version to get the cached firmware without any interacting * with userspace * * Return 0 if the firmware image has been cached successfully * Return !0 otherwise * */ static int cache_firmware(const char *fw_name) { int ret; const struct firmware *fw; pr_debug("%s: %s\n", __func__, fw_name); ret = request_firmware(&fw, fw_name, NULL); if (!ret) kfree(fw); pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret); return ret; } static struct fw_priv *lookup_fw_priv(const char *fw_name) { struct fw_priv *tmp; struct firmware_cache *fwc = &fw_cache; spin_lock(&fwc->lock); tmp = __lookup_fw_priv(fw_name); spin_unlock(&fwc->lock); return tmp; } /** * uncache_firmware() - remove one cached firmware image * @fw_name: the firmware image name * * Uncache one firmware image which has been cached successfully * before. * * Return 0 if the firmware cache has been removed successfully * Return !0 otherwise * */ static int uncache_firmware(const char *fw_name) { struct fw_priv *fw_priv; struct firmware fw; pr_debug("%s: %s\n", __func__, fw_name); if (firmware_request_builtin(&fw, fw_name)) return 0; fw_priv = lookup_fw_priv(fw_name); if (fw_priv) { free_fw_priv(fw_priv); return 0; } return -EINVAL; } static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) { struct fw_cache_entry *fce; fce = kzalloc(sizeof(*fce), GFP_ATOMIC); if (!fce) goto exit; fce->name = kstrdup_const(name, GFP_ATOMIC); if (!fce->name) { kfree(fce); fce = NULL; goto exit; } exit: return fce; } static int __fw_entry_found(const char *name) { struct firmware_cache *fwc = &fw_cache; struct fw_cache_entry *fce; list_for_each_entry(fce, &fwc->fw_names, list) { if (!strcmp(fce->name, name)) return 1; } return 0; } static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv) { const char *name = fw_priv->fw_name; struct firmware_cache *fwc = fw_priv->fwc; struct fw_cache_entry *fce; spin_lock(&fwc->name_lock); if (__fw_entry_found(name)) goto found; fce = alloc_fw_cache_entry(name); if (fce) { list_add(&fce->list, &fwc->fw_names); kref_get(&fw_priv->ref); pr_debug("%s: fw: %s\n", __func__, name); } found: spin_unlock(&fwc->name_lock); } static void free_fw_cache_entry(struct fw_cache_entry *fce) { kfree_const(fce->name); kfree(fce); } static void __async_dev_cache_fw_image(void *fw_entry, async_cookie_t cookie) { struct fw_cache_entry *fce = fw_entry; struct firmware_cache *fwc = &fw_cache; int ret; ret = cache_firmware(fce->name); if (ret) { spin_lock(&fwc->name_lock); list_del(&fce->list); spin_unlock(&fwc->name_lock); free_fw_cache_entry(fce); } } /* called with dev->devres_lock held */ static void dev_create_fw_entry(struct device *dev, void *res, void *data) { struct fw_name_devm *fwn = res; const char *fw_name = fwn->name; struct list_head *head = data; struct fw_cache_entry *fce; fce = alloc_fw_cache_entry(fw_name); if (fce) list_add(&fce->list, head); } static int devm_name_match(struct device *dev, void *res, void *match_data) { struct fw_name_devm *fwn = res; return (fwn->magic == (unsigned long)match_data); } static void dev_cache_fw_image(struct device *dev, void *data) { LIST_HEAD(todo); struct fw_cache_entry *fce; struct fw_cache_entry *fce_next; struct firmware_cache *fwc = &fw_cache; devres_for_each_res(dev, fw_name_devm_release, devm_name_match, &fw_cache, dev_create_fw_entry, &todo); list_for_each_entry_safe(fce, fce_next, &todo, list) { list_del(&fce->list); spin_lock(&fwc->name_lock); /* only one cache entry for one firmware */ if (!__fw_entry_found(fce->name)) { list_add(&fce->list, &fwc->fw_names); } else { free_fw_cache_entry(fce); fce = NULL; } spin_unlock(&fwc->name_lock); if (fce) async_schedule_domain(__async_dev_cache_fw_image, (void *)fce, &fw_cache_domain); } } static void __device_uncache_fw_images(void) { struct firmware_cache *fwc = &fw_cache; struct fw_cache_entry *fce; spin_lock(&fwc->name_lock); while (!list_empty(&fwc->fw_names)) { fce = list_entry(fwc->fw_names.next, struct fw_cache_entry, list); list_del(&fce->list); spin_unlock(&fwc->name_lock); uncache_firmware(fce->name); free_fw_cache_entry(fce); spin_lock(&fwc->name_lock); } spin_unlock(&fwc->name_lock); } /** * device_cache_fw_images() - cache devices' firmware * * If one device called request_firmware or its nowait version * successfully before, the firmware names are recored into the * device's devres link list, so device_cache_fw_images can call * cache_firmware() to cache these firmwares for the device, * then the device driver can load its firmwares easily at * time when system is not ready to complete loading firmware. */ static void device_cache_fw_images(void) { struct firmware_cache *fwc = &fw_cache; DEFINE_WAIT(wait); pr_debug("%s\n", __func__); /* cancel uncache work */ cancel_delayed_work_sync(&fwc->work); fw_fallback_set_cache_timeout(); mutex_lock(&fw_lock); fwc->state = FW_LOADER_START_CACHE; dpm_for_each_dev(NULL, dev_cache_fw_image); mutex_unlock(&fw_lock); /* wait for completion of caching firmware for all devices */ async_synchronize_full_domain(&fw_cache_domain); fw_fallback_set_default_timeout(); } /** * device_uncache_fw_images() - uncache devices' firmware * * uncache all firmwares which have been cached successfully * by device_uncache_fw_images earlier */ static void device_uncache_fw_images(void) { pr_debug("%s\n", __func__); __device_uncache_fw_images(); } static void device_uncache_fw_images_work(struct work_struct *work) { device_uncache_fw_images(); } /** * device_uncache_fw_images_delay() - uncache devices firmwares * @delay: number of milliseconds to delay uncache device firmwares * * uncache all devices's firmwares which has been cached successfully * by device_cache_fw_images after @delay milliseconds. */ static void device_uncache_fw_images_delay(unsigned long delay) { queue_delayed_work(system_power_efficient_wq, &fw_cache.work, msecs_to_jiffies(delay)); } static int fw_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused) { switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: case PM_RESTORE_PREPARE: /* * Here, kill pending fallback requests will only kill * non-uevent firmware request to avoid stalling suspend. */ kill_pending_fw_fallback_reqs(false); device_cache_fw_images(); break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: case PM_POST_RESTORE: /* * In case that system sleep failed and syscore_suspend is * not called. */ mutex_lock(&fw_lock); fw_cache.state = FW_LOADER_NO_CACHE; mutex_unlock(&fw_lock); device_uncache_fw_images_delay(10 * MSEC_PER_SEC); break; } return 0; } /* stop caching firmware once syscore_suspend is reached */ static int fw_suspend(void) { fw_cache.state = FW_LOADER_NO_CACHE; return 0; } static struct syscore_ops fw_syscore_ops = { .suspend = fw_suspend, }; static int __init register_fw_pm_ops(void) { int ret; spin_lock_init(&fw_cache.name_lock); INIT_LIST_HEAD(&fw_cache.fw_names); INIT_DELAYED_WORK(&fw_cache.work, device_uncache_fw_images_work); fw_cache.pm_notify.notifier_call = fw_pm_notify; ret = register_pm_notifier(&fw_cache.pm_notify); if (ret) return ret; register_syscore_ops(&fw_syscore_ops); return ret; } static inline void unregister_fw_pm_ops(void) { unregister_syscore_ops(&fw_syscore_ops); unregister_pm_notifier(&fw_cache.pm_notify); } #else static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv) { } static inline int register_fw_pm_ops(void) { return 0; } static inline void unregister_fw_pm_ops(void) { } #endif static void __init fw_cache_init(void) { spin_lock_init(&fw_cache.lock); INIT_LIST_HEAD(&fw_cache.head); fw_cache.state = FW_LOADER_NO_CACHE; } static int fw_shutdown_notify(struct notifier_block *unused1, unsigned long unused2, void *unused3) { /* * Kill all pending fallback requests to avoid both stalling shutdown, * and avoid a deadlock with the usermode_lock. */ kill_pending_fw_fallback_reqs(true); return NOTIFY_DONE; } static struct notifier_block fw_shutdown_nb = { .notifier_call = fw_shutdown_notify, }; static int __init firmware_class_init(void) { int ret; /* No need to unfold these on exit */ fw_cache_init(); ret = register_fw_pm_ops(); if (ret) return ret; ret = register_reboot_notifier(&fw_shutdown_nb); if (ret) goto out; return register_sysfs_loader(); out: unregister_fw_pm_ops(); return ret; } static void __exit firmware_class_exit(void) { unregister_fw_pm_ops(); unregister_reboot_notifier(&fw_shutdown_nb); unregister_sysfs_loader(); } fs_initcall(firmware_class_init); module_exit(firmware_class_exit); |
67 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> */ #ifndef _NET_IPV6_H #define _NET_IPV6_H #include <linux/ipv6.h> #include <linux/hardirq.h> #include <linux/jhash.h> #include <linux/refcount.h> #include <linux/jump_label_ratelimit.h> #include <net/if_inet6.h> #include <net/flow.h> #include <net/flow_dissector.h> #include <net/inet_dscp.h> #include <net/snmp.h> #include <net/netns/hash.h> struct ip_tunnel_info; #define SIN6_LEN_RFC2133 24 #define IPV6_MAXPLEN 65535 /* * NextHeader field of IPv6 header */ #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */ #define NEXTHDR_IPV4 4 /* IPv4 in IPv6 */ #define NEXTHDR_TCP 6 /* TCP segment. */ #define NEXTHDR_UDP 17 /* UDP message. */ #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ #define NEXTHDR_ROUTING 43 /* Routing header. */ #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */ #define NEXTHDR_GRE 47 /* GRE header. */ #define NEXTHDR_ESP 50 /* Encapsulating security payload. */ #define NEXTHDR_AUTH 51 /* Authentication header. */ #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ #define NEXTHDR_NONE 59 /* No next header */ #define NEXTHDR_DEST 60 /* Destination options header. */ #define NEXTHDR_SCTP 132 /* SCTP message. */ #define NEXTHDR_MOBILITY 135 /* Mobility header. */ #define NEXTHDR_MAX 255 #define IPV6_DEFAULT_HOPLIMIT 64 #define IPV6_DEFAULT_MCASTHOPS 1 /* Limits on Hop-by-Hop and Destination options. * * Per RFC8200 there is no limit on the maximum number or lengths of options in * Hop-by-Hop or Destination options other then the packet must fit in an MTU. * We allow configurable limits in order to mitigate potential denial of * service attacks. * * There are three limits that may be set: * - Limit the number of options in a Hop-by-Hop or Destination options * extension header * - Limit the byte length of a Hop-by-Hop or Destination options extension * header * - Disallow unknown options * * The limits are expressed in corresponding sysctls: * * ipv6.sysctl.max_dst_opts_cnt * ipv6.sysctl.max_hbh_opts_cnt * ipv6.sysctl.max_dst_opts_len * ipv6.sysctl.max_hbh_opts_len * * max_*_opts_cnt is the number of TLVs that are allowed for Destination * options or Hop-by-Hop options. If the number is less than zero then unknown * TLVs are disallowed and the number of known options that are allowed is the * absolute value. Setting the value to INT_MAX indicates no limit. * * max_*_opts_len is the length limit in bytes of a Destination or * Hop-by-Hop options extension header. Setting the value to INT_MAX * indicates no length limit. * * If a limit is exceeded when processing an extension header the packet is * silently discarded. */ /* Default limits for Hop-by-Hop and Destination options */ #define IP6_DEFAULT_MAX_DST_OPTS_CNT 8 #define IP6_DEFAULT_MAX_HBH_OPTS_CNT 8 #define IP6_DEFAULT_MAX_DST_OPTS_LEN INT_MAX /* No limit */ #define IP6_DEFAULT_MAX_HBH_OPTS_LEN INT_MAX /* No limit */ /* * Addr type * * type - unicast | multicast * scope - local | site | global * v4 - compat * v4mapped * any * loopback */ #define IPV6_ADDR_ANY 0x0000U #define IPV6_ADDR_UNICAST 0x0001U #define IPV6_ADDR_MULTICAST 0x0002U #define IPV6_ADDR_LOOPBACK 0x0010U #define IPV6_ADDR_LINKLOCAL 0x0020U #define IPV6_ADDR_SITELOCAL 0x0040U #define IPV6_ADDR_COMPATv4 0x0080U #define IPV6_ADDR_SCOPE_MASK 0x00f0U #define IPV6_ADDR_MAPPED 0x1000U /* * Addr scopes */ #define IPV6_ADDR_MC_SCOPE(a) \ ((a)->s6_addr[1] & 0x0f) /* nonstandard */ #define __IPV6_ADDR_SCOPE_INVALID -1 #define IPV6_ADDR_SCOPE_NODELOCAL 0x01 #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 #define IPV6_ADDR_SCOPE_SITELOCAL 0x05 #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 #define IPV6_ADDR_SCOPE_GLOBAL 0x0e /* * Addr flags */ #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ ((a)->s6_addr[1] & 0x10) #define IPV6_ADDR_MC_FLAG_PREFIX(a) \ ((a)->s6_addr[1] & 0x20) #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ ((a)->s6_addr[1] & 0x40) /* * fragmentation header */ struct frag_hdr { __u8 nexthdr; __u8 reserved; __be16 frag_off; __be32 identification; }; /* * Jumbo payload option, as described in RFC 2675 2. */ struct hop_jumbo_hdr { u8 nexthdr; u8 hdrlen; u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */ u8 tlv_len; /* 4 */ __be32 jumbo_payload_len; }; #define IP6_MF 0x0001 #define IP6_OFFSET 0xFFF8 struct ip6_fraglist_iter { struct ipv6hdr *tmp_hdr; struct sk_buff *frag; int offset; unsigned int hlen; __be32 frag_id; u8 nexthdr; }; int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr, u8 nexthdr, __be32 frag_id, struct ip6_fraglist_iter *iter); void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter); static inline struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter) { struct sk_buff *skb = iter->frag; iter->frag = skb->next; skb_mark_not_on_list(skb); return skb; } struct ip6_frag_state { u8 *prevhdr; unsigned int hlen; unsigned int mtu; unsigned int left; int offset; int ptr; int hroom; int troom; __be32 frag_id; u8 nexthdr; }; void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu, unsigned short needed_tailroom, int hdr_room, u8 *prevhdr, u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state); struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state); #define IP6_REPLY_MARK(net, mark) \ ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0) #include <net/sock.h> /* sysctls */ extern int sysctl_mld_max_msf; extern int sysctl_mld_qrv; #define _DEVINC(net, statname, mod, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\ mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\ }) /* per device counters are atomic_long_t */ #define _DEVINCATOMIC(net, statname, mod, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\ }) /* per device and per net counters are atomic_long_t */ #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ }) #define _DEVADD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\ }) #define _DEVUPD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\ }) /* MIBs */ #define IP6_INC_STATS(net, idev,field) \ _DEVINC(net, ipv6, , idev, field) #define __IP6_INC_STATS(net, idev,field) \ _DEVINC(net, ipv6, __, idev, field) #define IP6_ADD_STATS(net, idev,field,val) \ _DEVADD(net, ipv6, , idev, field, val) #define __IP6_ADD_STATS(net, idev,field,val) \ _DEVADD(net, ipv6, __, idev, field, val) #define IP6_UPD_PO_STATS(net, idev,field,val) \ _DEVUPD(net, ipv6, , idev, field, val) #define __IP6_UPD_PO_STATS(net, idev,field,val) \ _DEVUPD(net, ipv6, __, idev, field, val) #define ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, , idev, field) #define __ICMP6_INC_STATS(net, idev, field) \ _DEVINCATOMIC(net, icmpv6, __, idev, field) #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) #define ICMP6MSGIN_INC_STATS(net, idev, field) \ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) struct ip6_ra_chain { struct ip6_ra_chain *next; struct sock *sk; int sel; void (*destructor)(struct sock *); }; extern struct ip6_ra_chain *ip6_ra_chain; extern rwlock_t ip6_ra_lock; /* This structure is prepared by protocol, when parsing ancillary data and passed to IPv6. */ struct ipv6_txoptions { refcount_t refcnt; /* Length of this structure */ int tot_len; /* length of extension headers */ __u16 opt_flen; /* after fragment hdr */ __u16 opt_nflen; /* before fragment hdr */ struct ipv6_opt_hdr *hopopt; struct ipv6_opt_hdr *dst0opt; struct ipv6_rt_hdr *srcrt; /* Routing Header */ struct ipv6_opt_hdr *dst1opt; struct rcu_head rcu; /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ }; /* flowlabel_reflect sysctl values */ enum flowlabel_reflect { FLOWLABEL_REFLECT_ESTABLISHED = 1, FLOWLABEL_REFLECT_TCP_RESET = 2, FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, }; struct ip6_flowlabel { struct ip6_flowlabel __rcu *next; __be32 label; atomic_t users; struct in6_addr dst; struct ipv6_txoptions *opt; unsigned long linger; struct rcu_head rcu; u8 share; union { struct pid *pid; kuid_t uid; } owner; unsigned long lastuse; unsigned long expires; struct net *fl_net; }; #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) #define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000) #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) #define IPV6_TCLASS_SHIFT 20 struct ipv6_fl_socklist { struct ipv6_fl_socklist __rcu *next; struct ip6_flowlabel *fl; struct rcu_head rcu; }; struct ipcm6_cookie { struct sockcm_cookie sockc; __s16 hlimit; __s16 tclass; __u16 gso_size; __s8 dontfrag; struct ipv6_txoptions *opt; }; static inline void ipcm6_init(struct ipcm6_cookie *ipc6) { *ipc6 = (struct ipcm6_cookie) { .hlimit = -1, .tclass = -1, .dontfrag = -1, }; } static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, const struct sock *sk) { *ipc6 = (struct ipcm6_cookie) { .hlimit = -1, .tclass = inet6_sk(sk)->tclass, .dontfrag = inet6_test_bit(DONTFRAG, sk), }; } static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) { struct ipv6_txoptions *opt; rcu_read_lock(); opt = rcu_dereference(np->opt); if (opt) { if (!refcount_inc_not_zero(&opt->refcnt)) opt = NULL; else opt = rcu_pointer_handoff(opt); } rcu_read_unlock(); return opt; } static inline void txopt_put(struct ipv6_txoptions *opt) { if (opt && refcount_dec_and_test(&opt->refcnt)) kfree_rcu(opt, rcu); } #if IS_ENABLED(CONFIG_IPV6) struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label); extern struct static_key_false_deferred ipv6_flowlabel_exclusive; static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) { if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) && READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl)) return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); return NULL; } #endif struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, struct ipv6_txoptions *fopt); void fl6_free_socklist(struct sock *sk); int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen); int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); void ip6_flowlabel_cleanup(void); bool ip6_autoflowlabel(struct net *net, const struct sock *sk); static inline void fl6_sock_release(struct ip6_flowlabel *fl) { if (fl) atomic_dec(&fl->users); } enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info); void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len); int ip6_ra_control(struct sock *sk, int sel); int ipv6_parse_hopopts(struct sk_buff *skb); struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt); struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, struct ipv6_opt_hdr *newopt); struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); static inline struct ipv6_txoptions * ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt) { if (!opt) return NULL; return __ipv6_fixup_options(opt_space, opt); } bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, const struct inet6_skb_parm *opt); struct ipv6_txoptions *ipv6_update_options(struct sock *sk, struct ipv6_txoptions *opt); /* This helper is specialized for BIG TCP needs. * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header. * It assumes headers are already in skb->head. * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there. */ static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb) { const struct hop_jumbo_hdr *jhdr; const struct ipv6hdr *nhdr; if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) return 0; if (skb->protocol != htons(ETH_P_IPV6)) return 0; if (skb_network_offset(skb) + sizeof(struct ipv6hdr) + sizeof(struct hop_jumbo_hdr) > skb_headlen(skb)) return 0; nhdr = ipv6_hdr(skb); if (nhdr->nexthdr != NEXTHDR_HOP) return 0; jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1); if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || jhdr->nexthdr != IPPROTO_TCP) return 0; return jhdr->nexthdr; } /* Return 0 if HBH header is successfully removed * Or if HBH removal is unnecessary (packet is not big TCP) * Return error to indicate dropping the packet */ static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb) { const int hophdr_len = sizeof(struct hop_jumbo_hdr); int nexthdr = ipv6_has_hopopt_jumbo(skb); struct ipv6hdr *h6; if (!nexthdr) return 0; if (skb_cow_head(skb, 0)) return -1; /* Remove the HBH header. * Layout: [Ethernet header][IPv6 header][HBH][L4 Header] */ memmove(skb_mac_header(skb) + hophdr_len, skb_mac_header(skb), skb_network_header(skb) - skb_mac_header(skb) + sizeof(struct ipv6hdr)); __skb_pull(skb, hophdr_len); skb->network_header += hophdr_len; skb->mac_header += hophdr_len; h6 = ipv6_hdr(skb); h6->nexthdr = nexthdr; return 0; } static inline bool ipv6_accept_ra(const struct inet6_dev *idev) { s32 accept_ra = READ_ONCE(idev->cnf.accept_ra); /* If forwarding is enabled, RA are not accepted unless the special * hybrid mode (accept_ra=2) is enabled. */ return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 : accept_ra; } #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */ #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */ #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */ int __ipv6_addr_type(const struct in6_addr *addr); static inline int ipv6_addr_type(const struct in6_addr *addr) { return __ipv6_addr_type(addr) & 0xffff; } static inline int ipv6_addr_scope(const struct in6_addr *addr) { return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK; } static inline int __ipv6_addr_src_scope(int type) { return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16); } static inline int ipv6_addr_src_scope(const struct in6_addr *addr) { return __ipv6_addr_src_scope(__ipv6_addr_type(addr)); } static inline bool __ipv6_addr_needs_scope_id(int type) { return type & IPV6_ADDR_LINKLOCAL || (type & IPV6_ADDR_MULTICAST && (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL))); } static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface) { return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0; } static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) { return memcmp(a1, a2, sizeof(struct in6_addr)); } static inline bool ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, const struct in6_addr *a2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul1 = (const unsigned long *)a1; const unsigned long *ulm = (const unsigned long *)m; const unsigned long *ul2 = (const unsigned long *)a2; return !!(((ul1[0] ^ ul2[0]) & ulm[0]) | ((ul1[1] ^ ul2[1]) & ulm[1])); #else return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); #endif } static inline void ipv6_addr_prefix(struct in6_addr *pfx, const struct in6_addr *addr, int plen) { /* caller must guarantee 0 <= plen <= 128 */ int o = plen >> 3, b = plen & 0x7; memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr)); memcpy(pfx->s6_addr, addr, o); if (b != 0) pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b); } static inline void ipv6_addr_prefix_copy(struct in6_addr *addr, const struct in6_addr *pfx, int plen) { /* caller must guarantee 0 <= plen <= 128 */ int o = plen >> 3, b = plen & 0x7; memcpy(addr->s6_addr, pfx, o); if (b != 0) { addr->s6_addr[o] &= ~(0xff00 >> b); addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b)); } } static inline void __ipv6_addr_set_half(__be32 *addr, __be32 wh, __be32 wl) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #if defined(__BIG_ENDIAN) if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) { *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl)); return; } #elif defined(__LITTLE_ENDIAN) if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) { *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh)); return; } #endif #endif addr[0] = wh; addr[1] = wl; } static inline void ipv6_addr_set(struct in6_addr *addr, __be32 w1, __be32 w2, __be32 w3, __be32 w4) { __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); } static inline bool ipv6_addr_equal(const struct in6_addr *a1, const struct in6_addr *a2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul1 = (const unsigned long *)a1; const unsigned long *ul2 = (const unsigned long *)a2; return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; #else return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; #endif } #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 static inline bool __ipv6_prefix_equal64_half(const __be64 *a1, const __be64 *a2, unsigned int len) { if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len)))) return false; return true; } static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, const struct in6_addr *addr2, unsigned int prefixlen) { const __be64 *a1 = (const __be64 *)addr1; const __be64 *a2 = (const __be64 *)addr2; if (prefixlen >= 64) { if (a1[0] ^ a2[0]) return false; return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64); } return __ipv6_prefix_equal64_half(a1, a2, prefixlen); } #else static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, const struct in6_addr *addr2, unsigned int prefixlen) { const __be32 *a1 = addr1->s6_addr32; const __be32 *a2 = addr2->s6_addr32; unsigned int pdw, pbi; /* check complete u32 in prefix */ pdw = prefixlen >> 5; if (pdw && memcmp(a1, a2, pdw << 2)) return false; /* check incomplete u32 in prefix */ pbi = prefixlen & 0x1f; if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) return false; return true; } #endif static inline bool ipv6_addr_any(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul = (const unsigned long *)a; return (ul[0] | ul[1]) == 0UL; #else return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | a->s6_addr32[3]) == 0; #endif } static inline u32 ipv6_addr_hash(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul = (const unsigned long *)a; unsigned long x = ul[0] ^ ul[1]; return (u32)(x ^ (x >> 32)); #else return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^ a->s6_addr32[2] ^ a->s6_addr32[3]); #endif } /* more secured version of ipv6_addr_hash() */ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) { return jhash2((__force const u32 *)a->s6_addr32, ARRAY_SIZE(a->s6_addr32), initval); } static inline bool ipv6_addr_loopback(const struct in6_addr *a) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const __be64 *be = (const __be64 *)a; return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL; #else return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0; #endif } /* * Note that we must __force cast these to unsigned long to make sparse happy, * since all of the endian-annotated types are fixed size regardless of arch. */ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) { return ( #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 *(unsigned long *)a | #else (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) | #endif (__force unsigned long)(a->s6_addr32[2] ^ cpu_to_be32(0x0000ffff))) == 0UL; } static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a) { return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]); } static inline u32 ipv6_portaddr_hash(const struct net *net, const struct in6_addr *addr6, unsigned int port) { unsigned int hash, mix = net_hash_mix(net); if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; } /* * Check for a RFC 4843 ORCHID address * (Overlay Routable Cryptographic Hash Identifiers) */ static inline bool ipv6_addr_orchid(const struct in6_addr *a) { return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); } static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr) { return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); } static inline void ipv6_addr_set_v4mapped(const __be32 addr, struct in6_addr *v4mapped) { ipv6_addr_set(v4mapped, 0, 0, htonl(0x0000FFFF), addr); } /* * find the first different bit between two addresses * length of address must be a multiple of 32bits */ static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen) { const __be32 *a1 = token1, *a2 = token2; int i; addrlen >>= 2; for (i = 0; i < addrlen; i++) { __be32 xb = a1[i] ^ a2[i]; if (xb) return i * 32 + 31 - __fls(ntohl(xb)); } /* * we should *never* get to this point since that * would mean the addrs are equal * * However, we do get to it 8) And exactly, when * addresses are equal 8) * * ip route add 1111::/128 via ... * ip route add 1111::/64 via ... * and we are here. * * Ideally, this function should stop comparison * at prefix length. It does not, but it is still OK, * if returned value is greater than prefix length. * --ANK (980803) */ return addrlen << 5; } #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen) { const __be64 *a1 = token1, *a2 = token2; int i; addrlen >>= 3; for (i = 0; i < addrlen; i++) { __be64 xb = a1[i] ^ a2[i]; if (xb) return i * 64 + 63 - __fls(be64_to_cpu(xb)); } return addrlen << 6; } #endif static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 if (__builtin_constant_p(addrlen) && !(addrlen & 7)) return __ipv6_addr_diff64(token1, token2, addrlen); #endif return __ipv6_addr_diff32(token1, token2, addrlen); } static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) { return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } __be32 ipv6_select_ident(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr); __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, struct dst_entry *dst) { int hlimit; if (ipv6_addr_is_multicast(&fl6->daddr)) hlimit = READ_ONCE(np->mcast_hops); else hlimit = READ_ONCE(np->hop_limit); if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); return hlimit; } /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store * Equivalent to : flow->v6addrs.src = iph->saddr; * flow->v6addrs.dst = iph->daddr; */ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, const struct ipv6hdr *iph) { BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != offsetof(typeof(flow->addrs), v6addrs.src) + sizeof(flow->addrs.v6addrs.src)); memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs)); flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } #if IS_ENABLED(CONFIG_IPV6) static inline bool ipv6_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { return net->ipv6.sysctl.ip_nonlocal_bind || test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) || test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags); } /* Sysctl settings for net ipv6.auto_flowlabels */ #define IP6_AUTO_FLOW_LABEL_OFF 0 #define IP6_AUTO_FLOW_LABEL_OPTOUT 1 #define IP6_AUTO_FLOW_LABEL_OPTIN 2 #define IP6_AUTO_FLOW_LABEL_FORCED 3 #define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED #define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, __be32 flowlabel, bool autolabel, struct flowi6 *fl6) { u32 hash; /* @flowlabel may include more than a flow label, eg, the traffic class. * Here we want only the flow label value. */ flowlabel &= IPV6_FLOWLABEL_MASK; if (flowlabel || net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || (!autolabel && net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED)) return flowlabel; hash = skb_get_hash_flowi6(skb, fl6); /* Since this is being sent on the wire obfuscate hash a bit * to minimize possibility that any useful information to an * attacker is leaked. Only lower 20 bits are relevant. */ hash = rol32(hash, 16); flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; if (net->ipv6.sysctl.flowlabel_state_ranges) flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG; return flowlabel; } static inline int ip6_default_np_autolabel(struct net *net) { switch (net->ipv6.sysctl.auto_flowlabels) { case IP6_AUTO_FLOW_LABEL_OFF: case IP6_AUTO_FLOW_LABEL_OPTIN: default: return 0; case IP6_AUTO_FLOW_LABEL_OPTOUT: case IP6_AUTO_FLOW_LABEL_FORCED: return 1; } } #else static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, __be32 flowlabel, bool autolabel, struct flowi6 *fl6) { return flowlabel; } static inline int ip6_default_np_autolabel(struct net *net) { return 0; } #endif #if IS_ENABLED(CONFIG_IPV6) static inline int ip6_multipath_hash_policy(const struct net *net) { return net->ipv6.sysctl.multipath_hash_policy; } static inline u32 ip6_multipath_hash_fields(const struct net *net) { return net->ipv6.sysctl.multipath_hash_fields; } #else static inline int ip6_multipath_hash_policy(const struct net *net) { return 0; } static inline u32 ip6_multipath_hash_fields(const struct net *net) { return 0; } #endif /* * Header manipulation */ static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass, __be32 flowlabel) { *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel; } static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr) { return *(__be32 *)hdr & IPV6_FLOWINFO_MASK; } static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr) { return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK; } static inline u8 ip6_tclass(__be32 flowinfo) { return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT; } static inline dscp_t ip6_dscp(__be32 flowinfo) { return inet_dsfield_to_dscp(ip6_tclass(flowinfo)); } static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel) { return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel; } static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6) { return fl6->flowlabel & IPV6_FLOWLABEL_MASK; } /* * Prototypes exported by ipv6 */ /* * rcv function (called from netdevice level) */ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, struct net_device *orig_dev); int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); /* * upper-layer output functions */ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags); int ip6_push_pending_frames(struct sock *sk); void ip6_flush_pending_frames(struct sock *sk); int ip6_send_skb(struct sk_buff *skb); struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue, struct inet_cork_full *cork, struct inet6_cork *v6_cork); struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork); static inline struct sk_buff *ip6_finish_skb(struct sock *sk) { return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork, &inet6_sk(sk)->cork); } int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool connected); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *orig_dst); /* * skb processing functions */ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb); int ip6_forward(struct sk_buff *skb); int ip6_input(struct sk_buff *skb); int ip6_mc_input(struct sk_buff *skb); void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, bool have_final); int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); /* * Extension header (options) processing */ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto, struct in6_addr **daddr_p, struct in6_addr *saddr); void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto); int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, __be16 *frag_offp); bool ipv6_ext_hdr(u8 nexthdr); enum { IP6_FH_F_FRAG = (1 << 0), IP6_FH_F_AUTH = (1 << 1), IP6_FH_F_SKIP_RH = (1 << 2), }; /* find specified header and get offset to it */ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, unsigned short *fragoff, int *fragflg); int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type); struct in6_addr *fl6_update_dst(struct flowi6 *fl6, const struct ipv6_txoptions *opt, struct in6_addr *orig); /* * socket options (ipv6_sockglue.c) */ DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount); int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int do_ipv6_getsockopt(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, int addr_len); int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr); void ip6_datagram_release_cb(struct sock *sk); int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, int *addr_len); void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); void inet6_cleanup_sock(struct sock *sk); void inet6_sock_destruct(struct sock *sk); int inet6_release(struct socket *sock); int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int peer); int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int inet6_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); /* * reassembly.c */ extern const struct proto_ops inet6_stream_ops; extern const struct proto_ops inet6_dgram_ops; extern const struct proto_ops inet6_sockraw_ops; struct group_source_req; struct group_filter; int ip6_mc_source(int add, int omode, struct sock *sk, struct group_source_req *pgsr); int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf, struct sockaddr_storage *list); int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, sockptr_t optval, size_t ss_offset); #ifdef CONFIG_PROC_FS int ac6_proc_init(struct net *net); void ac6_proc_exit(struct net *net); int raw6_proc_init(void); void raw6_proc_exit(void); int tcp6_proc_init(struct net *net); void tcp6_proc_exit(struct net *net); int udp6_proc_init(struct net *net); void udp6_proc_exit(struct net *net); int udplite6_proc_init(void); void udplite6_proc_exit(void); int ipv6_misc_proc_init(void); void ipv6_misc_proc_exit(void); int snmp6_register_dev(struct inet6_dev *idev); int snmp6_unregister_dev(struct inet6_dev *idev); #else static inline int ac6_proc_init(struct net *net) { return 0; } static inline void ac6_proc_exit(struct net *net) { } static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; } static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } #endif #ifdef CONFIG_SYSCTL struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); size_t ipv6_icmp_sysctl_table_size(void); struct ctl_table *ipv6_route_sysctl_init(struct net *net); size_t ipv6_route_sysctl_table_size(struct net *net); int ipv6_sysctl_register(void); void ipv6_sysctl_unregister(void); #endif int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, const struct in6_addr *addr, unsigned int mode); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); static inline int ip6_sock_set_v6only(struct sock *sk) { if (inet_sk(sk)->inet_num) return -EINVAL; lock_sock(sk); sk->sk_ipv6only = true; release_sock(sk); return 0; } static inline void ip6_sock_set_recverr(struct sock *sk) { inet6_set_bit(RECVERR6, sk); } #define IPV6_PREFER_SRC_MASK (IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBLIC | \ IPV6_PREFER_SRC_COA) static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val) { unsigned int prefmask = ~IPV6_PREFER_SRC_MASK; unsigned int pref = 0; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { case IPV6_PREFER_SRC_PUBLIC: pref |= IPV6_PREFER_SRC_PUBLIC; prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP); break; case IPV6_PREFER_SRC_TMP: pref |= IPV6_PREFER_SRC_TMP; prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP); break; case IPV6_PREFER_SRC_PUBTMP_DEFAULT: prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | IPV6_PREFER_SRC_TMP); break; case 0: break; default: return -EINVAL; } /* check HOME/COA conflicts */ switch (val & (IPV6_PREFER_SRC_HOME | IPV6_PREFER_SRC_COA)) { case IPV6_PREFER_SRC_HOME: prefmask &= ~IPV6_PREFER_SRC_COA; break; case IPV6_PREFER_SRC_COA: pref |= IPV6_PREFER_SRC_COA; break; case 0: break; default: return -EINVAL; } /* check CGA/NONCGA conflicts */ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { case IPV6_PREFER_SRC_CGA: case IPV6_PREFER_SRC_NONCGA: case 0: break; default: return -EINVAL; } WRITE_ONCE(inet6_sk(sk)->srcprefs, (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref); return 0; } static inline void ip6_sock_set_recvpktinfo(struct sock *sk) { lock_sock(sk); inet6_sk(sk)->rxopt.bits.rxinfo = true; release_sock(sk); } #define IPV6_ADDR_WORDS 4 static inline void ipv6_addr_cpu_to_be32(__be32 *dst, const u32 *src) { cpu_to_be32_array(dst, src, IPV6_ADDR_WORDS); } static inline void ipv6_addr_be32_to_cpu(u32 *dst, const __be32 *src) { be32_to_cpu_array(dst, src, IPV6_ADDR_WORDS); } #endif /* _NET_IPV6_H */ |
9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | /* * Linux Security Module interfaces * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) * Copyright (C) 2015 Intel Corporation. * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2016 Mellanox Techonologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Due to this file being licensed under the GPL there is controversy over * whether this permits you to write a module that #includes this file * without placing your module under the GPL. Please consult a lawyer for * advice before doing this. * */ #ifndef __LINUX_LSM_HOOKS_H #define __LINUX_LSM_HOOKS_H #include <uapi/linux/lsm.h> #include <linux/security.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/xattr.h> #include <linux/static_call.h> #include <linux/unroll.h> #include <linux/jump_label.h> #include <linux/lsm_count.h> union security_list_options { #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__); #include "lsm_hook_defs.h" #undef LSM_HOOK void *lsm_func_addr; }; /* * @key: static call key as defined by STATIC_CALL_KEY * @trampoline: static call trampoline as defined by STATIC_CALL_TRAMP * @hl: The security_hook_list as initialized by the owning LSM. * @active: Enabled when the static call has an LSM hook associated. */ struct lsm_static_call { struct static_call_key *key; void *trampoline; struct security_hook_list *hl; /* this needs to be true or false based on what the key defaults to */ struct static_key_false *active; } __randomize_layout; /* * Table of the static calls for each LSM hook. * Once the LSMs are initialized, their callbacks will be copied to these * tables such that the calls are filled backwards (from last to first). * This way, we can jump directly to the first used static call, and execute * all of them after. This essentially makes the entry point * dynamic to adapt the number of static calls to the number of callbacks. */ struct lsm_static_calls_table { #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ struct lsm_static_call NAME[MAX_LSM_COUNT]; #include <linux/lsm_hook_defs.h> #undef LSM_HOOK } __packed __randomize_layout; /** * struct lsm_id - Identify a Linux Security Module. * @lsm: name of the LSM, must be approved by the LSM maintainers * @id: LSM ID number from uapi/linux/lsm.h * * Contains the information that identifies the LSM. */ struct lsm_id { const char *name; u64 id; }; /* * Security module hook list structure. * For use with generic list macros for common operations. * * struct security_hook_list - Contents of a cacheable, mappable object. * @scalls: The beginning of the array of static calls assigned to this hook. * @hook: The callback for the hook. * @lsm: The name of the lsm that owns this hook. */ struct security_hook_list { struct lsm_static_call *scalls; union security_list_options hook; const struct lsm_id *lsmid; } __randomize_layout; /* * Security blob size or offset data. */ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_ib; int lbs_inode; int lbs_sock; int lbs_superblock; int lbs_ipc; int lbs_key; int lbs_msg_msg; int lbs_perf_event; int lbs_task; int lbs_xattr_count; /* number of xattr slots in new_xattrs array */ int lbs_tun_dev; int lbs_bdev; }; /* * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void * LSM hooks (in include/linux/lsm_hook_defs.h). */ #define LSM_RET_VOID ((void) 0) /* * Initializing a security_hook_list structure takes * up a lot of space in a source file. This macro takes * care of the common case and reduces the amount of * text involved. */ #define LSM_HOOK_INIT(NAME, HOOK) \ { \ .scalls = static_calls_table.NAME, \ .hook = { .NAME = HOOK } \ } extern void security_add_hooks(struct security_hook_list *hooks, int count, const struct lsm_id *lsmid); #define LSM_FLAG_LEGACY_MAJOR BIT(0) #define LSM_FLAG_EXCLUSIVE BIT(1) enum lsm_order { LSM_ORDER_FIRST = -1, /* This is only for capabilities. */ LSM_ORDER_MUTABLE = 0, LSM_ORDER_LAST = 1, /* This is only for integrity. */ }; struct lsm_info { const char *name; /* Required. */ enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */ unsigned long flags; /* Optional: flags describing LSM */ int *enabled; /* Optional: controlled by CONFIG_LSM */ int (*init)(void); /* Required. */ struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */ }; #define DEFINE_LSM(lsm) \ static struct lsm_info __lsm_##lsm \ __used __section(".lsm_info.init") \ __aligned(sizeof(unsigned long)) #define DEFINE_EARLY_LSM(lsm) \ static struct lsm_info __early_lsm_##lsm \ __used __section(".early_lsm_info.init") \ __aligned(sizeof(unsigned long)) /* DO NOT tamper with these variables outside of the LSM framework */ extern char *lsm_names; extern struct lsm_static_calls_table static_calls_table __ro_after_init; extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; /** * lsm_get_xattr_slot - Return the next available slot and increment the index * @xattrs: array storing LSM-provided xattrs * @xattr_count: number of already stored xattrs (updated) * * Retrieve the first available slot in the @xattrs array to fill with an xattr, * and increment @xattr_count. * * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise. */ static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs, int *xattr_count) { if (unlikely(!xattrs)) return NULL; return &xattrs[(*xattr_count)++]; } #endif /* ! __LINUX_LSM_HOOKS_H */ |
15 12 13 15 15 13 15 15 15 15 15 2 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 | /* * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin * cleaned up code to current version of sparse and added the slicing-by-8 * algorithm to the closely similar existing slicing-by-4 algorithm. * * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! * Code was from the public domain, copyright abandoned. Code was * subsequently included in the kernel, thus was re-licensed under the * GNU GPL v2. * * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com> * Same crc32 function was used in 5 other places in the kernel. * I made one version, and deleted the others. * There are various incantations of crc32(). Some use a seed of 0 or ~0. * Some xor at the end with ~0. The generic crc32() function takes * seed as an argument, and doesn't xor at the end. Then individual * users can do whatever they need. * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. * fs/jffs2 uses seed 0, doesn't xor with ~0. * fs/partitions/efi.c uses seed ~0, xor's with ~0. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ /* see: Documentation/staging/crc32.rst for a description of algorithms */ #include <linux/crc32.h> #include <linux/crc32poly.h> #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include "crc32defs.h" #if CRC_LE_BITS > 8 # define tole(x) ((__force u32) cpu_to_le32(x)) #else # define tole(x) (x) #endif #if CRC_BE_BITS > 8 # define tobe(x) ((__force u32) cpu_to_be32(x)) #else # define tobe(x) (x) #endif #include "crc32table.h" MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); MODULE_DESCRIPTION("Various CRC32 calculations"); MODULE_LICENSE("GPL"); #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 /* implements slicing-by-4 or slicing-by-8 algorithm */ static inline u32 __pure crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) { # ifdef __LITTLE_ENDIAN # define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8) # define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) # else # define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) # define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) # endif const u32 *b; size_t rem_len; # ifdef CONFIG_X86 size_t i; # endif const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3]; # if CRC_LE_BITS != 32 const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; # endif u32 q; /* Align it */ if (unlikely((long)buf & 3 && len)) { do { DO_CRC(*buf++); } while ((--len) && ((long)buf)&3); } # if CRC_LE_BITS == 32 rem_len = len & 3; len = len >> 2; # else rem_len = len & 7; len = len >> 3; # endif b = (const u32 *)buf; # ifdef CONFIG_X86 --b; for (i = 0; i < len; i++) { # else for (--b; len; --len) { # endif q = crc ^ *++b; /* use pre increment for speed */ # if CRC_LE_BITS == 32 crc = DO_CRC4; # else crc = DO_CRC8; q = *++b; crc ^= DO_CRC4; # endif } len = rem_len; /* And the last few bytes */ if (len) { u8 *p = (u8 *)(b + 1) - 1; # ifdef CONFIG_X86 for (i = 0; i < len; i++) DO_CRC(*++p); /* use pre increment for speed */ # else do { DO_CRC(*++p); /* use pre increment for speed */ } while (--len); # endif } return crc; #undef DO_CRC #undef DO_CRC4 #undef DO_CRC8 } #endif /** * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II * CRC32/CRC32C * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other * uses, or the previous crc32/crc32c value if computing incrementally. * @p: pointer to buffer over which CRC32/CRC32C is run * @len: length of buffer @p * @tab: little-endian Ethernet table * @polynomial: CRC32/CRC32c LE polynomial */ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], u32 polynomial) { #if CRC_LE_BITS == 1 int i; while (len--) { crc ^= *p++; for (i = 0; i < 8; i++) crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); } # elif CRC_LE_BITS == 2 while (len--) { crc ^= *p++; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; } # elif CRC_LE_BITS == 4 while (len--) { crc ^= *p++; crc = (crc >> 4) ^ tab[0][crc & 15]; crc = (crc >> 4) ^ tab[0][crc & 15]; } # elif CRC_LE_BITS == 8 /* aka Sarwate algorithm */ while (len--) { crc ^= *p++; crc = (crc >> 8) ^ tab[0][crc & 255]; } # else crc = (__force u32) __cpu_to_le32(crc); crc = crc32_body(crc, p, len, tab); crc = __le32_to_cpu((__force __le32)crc); #endif return crc; } #if CRC_LE_BITS == 1 u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); } u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); } #else u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE); } u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); } #endif EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be); /* * This multiplies the polynomials x and y modulo the given modulus. * This follows the "little-endian" CRC convention that the lsbit * represents the highest power of x, and the msbit represents x^0. */ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus) { u32 product = x & 1 ? y : 0; int i; for (i = 0; i < 31; i++) { product = (product >> 1) ^ (product & 1 ? modulus : 0); x >>= 1; product ^= x & 1 ? y : 0; } return product; } /** * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient) * @len: The number of bytes. @crc is multiplied by x^(8*@len) * @polynomial: The modulus used to reduce the result to 32 bits. * * It's possible to parallelize CRC computations by computing a CRC * over separate ranges of a buffer, then summing them. * This shifts the given CRC by 8*len bits (i.e. produces the same effect * as appending len bytes of zero to the data), in time proportional * to log(len). */ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len, u32 polynomial) { u32 power = polynomial; /* CRC of x^32 */ int i; /* Shift up to 32 bits in the simple linear way */ for (i = 0; i < 8 * (int)(len & 3); i++) crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0); len >>= 2; if (!len) return crc; for (;;) { /* "power" is x^(2^i), modulo the polynomial */ if (len & 1) crc = gf2_multiply(crc, power, polynomial); len >>= 1; if (!len) break; /* Square power, advancing to x^(2^(i+1)) */ power = gf2_multiply(power, power, polynomial); } return crc; } u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len) { return crc32_generic_shift(crc, len, CRC32_POLY_LE); } u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len) { return crc32_generic_shift(crc, len, CRC32C_POLY_LE); } EXPORT_SYMBOL(crc32_le_shift); EXPORT_SYMBOL(__crc32c_le_shift); /** * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for * other uses, or the previous crc32 value if computing incrementally. * @p: pointer to buffer over which CRC32 is run * @len: length of buffer @p * @tab: big-endian Ethernet table * @polynomial: CRC32 BE polynomial */ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], u32 polynomial) { #if CRC_BE_BITS == 1 int i; while (len--) { crc ^= *p++ << 24; for (i = 0; i < 8; i++) crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : 0); } # elif CRC_BE_BITS == 2 while (len--) { crc ^= *p++ << 24; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; } # elif CRC_BE_BITS == 4 while (len--) { crc ^= *p++ << 24; crc = (crc << 4) ^ tab[0][crc >> 28]; crc = (crc << 4) ^ tab[0][crc >> 28]; } # elif CRC_BE_BITS == 8 while (len--) { crc ^= *p++ << 24; crc = (crc << 8) ^ tab[0][crc >> 24]; } # else crc = (__force u32) __cpu_to_be32(crc); crc = crc32_body(crc, p, len, tab); crc = __be32_to_cpu((__force __be32)crc); # endif return crc; } #if CRC_BE_BITS == 1 u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE); } #else u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE); } #endif EXPORT_SYMBOL(crc32_be); |
189 326 326 326 327 327 277 325 325 254 325 326 305 304 243 305 303 305 327 327 327 327 327 304 303 305 305 304 304 303 305 305 304 305 304 303 304 305 305 51 182 181 182 182 207 254 254 207 47 243 60 243 182 243 242 243 192 192 191 50 51 51 182 326 326 326 49 47 49 49 49 49 49 12 49 49 277 276 277 277 277 277 277 277 189 188 276 214 277 188 276 275 275 275 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 | // SPDX-License-Identifier: GPL-2.0-only /* * proc/fs/generic.c --- generic routines for the proc-fs * * This file contains generic proc-fs routines for handling * directories and files. * * Copyright (C) 1991, 1992 Linus Torvalds. * Copyright (C) 1997 Theodore Ts'o */ #include <linux/cache.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/printk.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/idr.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include "internal.h" static DEFINE_RWLOCK(proc_subdir_lock); struct kmem_cache *proc_dir_entry_cache __ro_after_init; void pde_free(struct proc_dir_entry *pde) { if (S_ISLNK(pde->mode)) kfree(pde->data); if (pde->name != pde->inline_name) kfree(pde->name); kmem_cache_free(proc_dir_entry_cache, pde); } static int proc_match(const char *name, struct proc_dir_entry *de, unsigned int len) { if (len < de->namelen) return -1; if (len > de->namelen) return 1; return memcmp(name, de->name, len); } static struct proc_dir_entry *pde_subdir_first(struct proc_dir_entry *dir) { return rb_entry_safe(rb_first(&dir->subdir), struct proc_dir_entry, subdir_node); } static struct proc_dir_entry *pde_subdir_next(struct proc_dir_entry *dir) { return rb_entry_safe(rb_next(&dir->subdir_node), struct proc_dir_entry, subdir_node); } static struct proc_dir_entry *pde_subdir_find(struct proc_dir_entry *dir, const char *name, unsigned int len) { struct rb_node *node = dir->subdir.rb_node; while (node) { struct proc_dir_entry *de = rb_entry(node, struct proc_dir_entry, subdir_node); int result = proc_match(name, de, len); if (result < 0) node = node->rb_left; else if (result > 0) node = node->rb_right; else return de; } return NULL; } static bool pde_subdir_insert(struct proc_dir_entry *dir, struct proc_dir_entry *de) { struct rb_root *root = &dir->subdir; struct rb_node **new = &root->rb_node, *parent = NULL; /* Figure out where to put new node */ while (*new) { struct proc_dir_entry *this = rb_entry(*new, struct proc_dir_entry, subdir_node); int result = proc_match(de->name, this, de->namelen); parent = *new; if (result < 0) new = &(*new)->rb_left; else if (result > 0) new = &(*new)->rb_right; else return false; } /* Add new node and rebalance tree. */ rb_link_node(&de->subdir_node, parent, new); rb_insert_color(&de->subdir_node, root); return true; } static int proc_notify_change(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { struct inode *inode = d_inode(dentry); struct proc_dir_entry *de = PDE(inode); int error; error = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (error) return error; setattr_copy(&nop_mnt_idmap, inode, iattr); proc_set_user(de, inode->i_uid, inode->i_gid); de->mode = inode->i_mode; return 0; } static int proc_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct proc_dir_entry *de = PDE(inode); if (de) { nlink_t nlink = READ_ONCE(de->nlink); if (nlink > 0) { set_nlink(inode, nlink); } } generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); return 0; } static const struct inode_operations proc_file_inode_operations = { .setattr = proc_notify_change, }; /* * This function parses a name such as "tty/driver/serial", and * returns the struct proc_dir_entry for "/proc/tty/driver", and * returns "serial" in residual. */ static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret, const char **residual) { const char *cp = name, *next; struct proc_dir_entry *de; de = *ret ?: &proc_root; while ((next = strchr(cp, '/')) != NULL) { de = pde_subdir_find(de, cp, next - cp); if (!de) { WARN(1, "name '%s'\n", name); return -ENOENT; } cp = next + 1; } *residual = cp; *ret = de; return 0; } static int xlate_proc_name(const char *name, struct proc_dir_entry **ret, const char **residual) { int rv; read_lock(&proc_subdir_lock); rv = __xlate_proc_name(name, ret, residual); read_unlock(&proc_subdir_lock); return rv; } static DEFINE_IDA(proc_inum_ida); #define PROC_DYNAMIC_FIRST 0xF0000000U /* * Return an inode number between PROC_DYNAMIC_FIRST and * 0xffffffff, or zero on failure. */ int proc_alloc_inum(unsigned int *inum) { int i; i = ida_alloc_max(&proc_inum_ida, UINT_MAX - PROC_DYNAMIC_FIRST, GFP_KERNEL); if (i < 0) return i; *inum = PROC_DYNAMIC_FIRST + (unsigned int)i; return 0; } void proc_free_inum(unsigned int inum) { ida_free(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); } static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; if (atomic_read(&PDE(d_inode(dentry))->in_use) < 0) return 0; /* revalidate */ return 1; } static int proc_misc_d_delete(const struct dentry *dentry) { return atomic_read(&PDE(d_inode(dentry))->in_use) < 0; } static const struct dentry_operations proc_misc_dentry_ops = { .d_revalidate = proc_misc_d_revalidate, .d_delete = proc_misc_d_delete, }; /* * Don't create negative dentries here, return -ENOENT by hand * instead. */ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, struct proc_dir_entry *de) { struct inode *inode; read_lock(&proc_subdir_lock); de = pde_subdir_find(de, dentry->d_name.name, dentry->d_name.len); if (de) { pde_get(de); read_unlock(&proc_subdir_lock); inode = proc_get_inode(dir->i_sb, de); if (!inode) return ERR_PTR(-ENOMEM); d_set_d_op(dentry, de->proc_dops); return d_splice_alias(inode, dentry); } read_unlock(&proc_subdir_lock); return ERR_PTR(-ENOENT); } struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct proc_fs_info *fs_info = proc_sb_info(dir->i_sb); if (fs_info->pidonly == PROC_PIDONLY_ON) return ERR_PTR(-ENOENT); return proc_lookup_de(dir, dentry, PDE(dir)); } /* * This returns non-zero if at EOF, so that the /proc * root directory can use this and check if it should * continue with the <pid> entries.. * * Note that the VFS-layer doesn't care about the return * value of the readdir() call, as long as it's non-negative * for success.. */ int proc_readdir_de(struct file *file, struct dir_context *ctx, struct proc_dir_entry *de) { int i; if (!dir_emit_dots(file, ctx)) return 0; i = ctx->pos - 2; read_lock(&proc_subdir_lock); de = pde_subdir_first(de); for (;;) { if (!de) { read_unlock(&proc_subdir_lock); return 0; } if (!i) break; de = pde_subdir_next(de); i--; } do { struct proc_dir_entry *next; pde_get(de); read_unlock(&proc_subdir_lock); if (!dir_emit(ctx, de->name, de->namelen, de->low_ino, de->mode >> 12)) { pde_put(de); return 0; } ctx->pos++; read_lock(&proc_subdir_lock); next = pde_subdir_next(de); pde_put(de); de = next; } while (de); read_unlock(&proc_subdir_lock); return 1; } int proc_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb); if (fs_info->pidonly == PROC_PIDONLY_ON) return 1; return proc_readdir_de(file, ctx, PDE(inode)); } /* * These are the generic /proc directory operations. They * use the in-memory "struct proc_dir_entry" tree to parse * the /proc directory. */ static const struct file_operations proc_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = proc_readdir, }; static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) { return 0; } const struct dentry_operations proc_net_dentry_ops = { .d_revalidate = proc_net_d_revalidate, .d_delete = always_delete_dentry, }; /* * proc directories can do almost nothing.. */ static const struct inode_operations proc_dir_inode_operations = { .lookup = proc_lookup, .getattr = proc_getattr, .setattr = proc_notify_change, }; /* returns the registered entry, or frees dp and returns NULL on failure */ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir, struct proc_dir_entry *dp) { if (proc_alloc_inum(&dp->low_ino)) goto out_free_entry; write_lock(&proc_subdir_lock); dp->parent = dir; if (pde_subdir_insert(dir, dp) == false) { WARN(1, "proc_dir_entry '%s/%s' already registered\n", dir->name, dp->name); write_unlock(&proc_subdir_lock); goto out_free_inum; } dir->nlink++; write_unlock(&proc_subdir_lock); return dp; out_free_inum: proc_free_inum(dp->low_ino); out_free_entry: pde_free(dp); return NULL; } static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, const char *name, umode_t mode, nlink_t nlink) { struct proc_dir_entry *ent = NULL; const char *fn; struct qstr qstr; if (xlate_proc_name(name, parent, &fn) != 0) goto out; qstr.name = fn; qstr.len = strlen(fn); if (qstr.len == 0 || qstr.len >= 256) { WARN(1, "name len %u\n", qstr.len); return NULL; } if (qstr.len == 1 && fn[0] == '.') { WARN(1, "name '.'\n"); return NULL; } if (qstr.len == 2 && fn[0] == '.' && fn[1] == '.') { WARN(1, "name '..'\n"); return NULL; } if (*parent == &proc_root && name_to_int(&qstr) != ~0U) { WARN(1, "create '/proc/%s' by hand\n", qstr.name); return NULL; } if (is_empty_pde(*parent)) { WARN(1, "attempt to add to permanently empty directory"); return NULL; } ent = kmem_cache_zalloc(proc_dir_entry_cache, GFP_KERNEL); if (!ent) goto out; if (qstr.len + 1 <= SIZEOF_PDE_INLINE_NAME) { ent->name = ent->inline_name; } else { ent->name = kmalloc(qstr.len + 1, GFP_KERNEL); if (!ent->name) { pde_free(ent); return NULL; } } memcpy(ent->name, fn, qstr.len + 1); ent->namelen = qstr.len; ent->mode = mode; ent->nlink = nlink; ent->subdir = RB_ROOT; refcount_set(&ent->refcnt, 1); spin_lock_init(&ent->pde_unload_lock); INIT_LIST_HEAD(&ent->pde_openers); proc_set_user(ent, (*parent)->uid, (*parent)->gid); ent->proc_dops = &proc_misc_dentry_ops; /* Revalidate everything under /proc/${pid}/net */ if ((*parent)->proc_dops == &proc_net_dentry_ops) pde_force_lookup(ent); out: return ent; } struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent, const char *dest) { struct proc_dir_entry *ent; ent = __proc_create(&parent, name, (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); if (ent) { ent->size = strlen(dest); ent->data = kmemdup(dest, ent->size + 1, GFP_KERNEL); if (ent->data) { ent->proc_iops = &proc_link_inode_operations; ent = proc_register(parent, ent); } else { pde_free(ent); ent = NULL; } } return ent; } EXPORT_SYMBOL(proc_symlink); struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data, bool force_lookup) { struct proc_dir_entry *ent; if (mode == 0) mode = S_IRUGO | S_IXUGO; ent = __proc_create(&parent, name, S_IFDIR | mode, 2); if (ent) { ent->data = data; ent->proc_dir_ops = &proc_dir_operations; ent->proc_iops = &proc_dir_inode_operations; if (force_lookup) { pde_force_lookup(ent); } ent = proc_register(parent, ent); } return ent; } EXPORT_SYMBOL_GPL(_proc_mkdir); struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return _proc_mkdir(name, mode, parent, data, false); } EXPORT_SYMBOL_GPL(proc_mkdir_data); struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, struct proc_dir_entry *parent) { return proc_mkdir_data(name, mode, parent, NULL); } EXPORT_SYMBOL(proc_mkdir_mode); struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) { return proc_mkdir_data(name, 0, parent, NULL); } EXPORT_SYMBOL(proc_mkdir); struct proc_dir_entry *proc_create_mount_point(const char *name) { umode_t mode = S_IFDIR | S_IRUGO | S_IXUGO; struct proc_dir_entry *ent, *parent = NULL; ent = __proc_create(&parent, name, mode, 2); if (ent) { ent->data = NULL; ent->proc_dir_ops = NULL; ent->proc_iops = NULL; ent = proc_register(parent, ent); } return ent; } EXPORT_SYMBOL(proc_create_mount_point); struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, struct proc_dir_entry **parent, void *data) { struct proc_dir_entry *p; if ((mode & S_IFMT) == 0) mode |= S_IFREG; if ((mode & S_IALLUGO) == 0) mode |= S_IRUGO; if (WARN_ON_ONCE(!S_ISREG(mode))) return NULL; p = __proc_create(parent, name, mode, 1); if (p) { p->proc_iops = &proc_file_inode_operations; p->data = data; } return p; } static inline void pde_set_flags(struct proc_dir_entry *pde) { if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT) pde->flags |= PROC_ENTRY_PERMANENT; } struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops, void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; p->proc_ops = proc_ops; pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_data); struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops) { return proc_create_data(name, mode, parent, proc_ops, NULL); } EXPORT_SYMBOL(proc_create); static int proc_seq_open(struct inode *inode, struct file *file) { struct proc_dir_entry *de = PDE(inode); if (de->state_size) return seq_open_private(file, de->seq_ops, de->state_size); return seq_open(file, de->seq_ops); } static int proc_seq_release(struct inode *inode, struct file *file) { struct proc_dir_entry *de = PDE(inode); if (de->state_size) return seq_release_private(inode, file); return seq_release(inode, file); } static const struct proc_ops proc_seq_ops = { /* not permanent -- can call into arbitrary seq_operations */ .proc_open = proc_seq_open, .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = proc_seq_release, }; struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, unsigned int state_size, void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; p->proc_ops = &proc_seq_ops; p->seq_ops = ops; p->state_size = state_size; return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_seq_private); static int proc_single_open(struct inode *inode, struct file *file) { struct proc_dir_entry *de = PDE(inode); return single_open(file, de->single_show, de->data); } static const struct proc_ops proc_single_ops = { /* not permanent -- can call into arbitrary ->single_show */ .proc_open = proc_single_open, .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = single_release, }; struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, struct proc_dir_entry *parent, int (*show)(struct seq_file *, void *), void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; p->proc_ops = &proc_single_ops; p->single_show = show; return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_single_data); void proc_set_size(struct proc_dir_entry *de, loff_t size) { de->size = size; } EXPORT_SYMBOL(proc_set_size); void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) { de->uid = uid; de->gid = gid; } EXPORT_SYMBOL(proc_set_user); void pde_put(struct proc_dir_entry *pde) { if (refcount_dec_and_test(&pde->refcnt)) { proc_free_inum(pde->low_ino); pde_free(pde); } } /* * Remove a /proc entry and free it if it's not currently in use. */ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) { struct proc_dir_entry *de = NULL; const char *fn = name; unsigned int len; write_lock(&proc_subdir_lock); if (__xlate_proc_name(name, &parent, &fn) != 0) { write_unlock(&proc_subdir_lock); return; } len = strlen(fn); de = pde_subdir_find(parent, fn, len); if (de) { if (unlikely(pde_is_permanent(de))) { WARN(1, "removing permanent /proc entry '%s'", de->name); de = NULL; } else { rb_erase(&de->subdir_node, &parent->subdir); if (S_ISDIR(de->mode)) parent->nlink--; } } write_unlock(&proc_subdir_lock); if (!de) { WARN(1, "name '%s'\n", name); return; } proc_entry_rundown(de); WARN(pde_subdir_first(de), "%s: removing non-empty directory '%s/%s', leaking at least '%s'\n", __func__, de->parent->name, de->name, pde_subdir_first(de)->name); pde_put(de); } EXPORT_SYMBOL(remove_proc_entry); int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { struct proc_dir_entry *root = NULL, *de, *next; const char *fn = name; unsigned int len; write_lock(&proc_subdir_lock); if (__xlate_proc_name(name, &parent, &fn) != 0) { write_unlock(&proc_subdir_lock); return -ENOENT; } len = strlen(fn); root = pde_subdir_find(parent, fn, len); if (!root) { write_unlock(&proc_subdir_lock); return -ENOENT; } if (unlikely(pde_is_permanent(root))) { write_unlock(&proc_subdir_lock); WARN(1, "removing permanent /proc entry '%s/%s'", root->parent->name, root->name); return -EINVAL; } rb_erase(&root->subdir_node, &parent->subdir); de = root; while (1) { next = pde_subdir_first(de); if (next) { if (unlikely(pde_is_permanent(next))) { write_unlock(&proc_subdir_lock); WARN(1, "removing permanent /proc entry '%s/%s'", next->parent->name, next->name); return -EINVAL; } rb_erase(&next->subdir_node, &de->subdir); de = next; continue; } next = de->parent; if (S_ISDIR(de->mode)) next->nlink--; write_unlock(&proc_subdir_lock); proc_entry_rundown(de); if (de == root) break; pde_put(de); write_lock(&proc_subdir_lock); de = next; } pde_put(root); return 0; } EXPORT_SYMBOL(remove_proc_subtree); void *proc_get_parent_data(const struct inode *inode) { struct proc_dir_entry *de = PDE(inode); return de->parent->data; } EXPORT_SYMBOL_GPL(proc_get_parent_data); void proc_remove(struct proc_dir_entry *de) { if (de) remove_proc_subtree(de->name, de->parent); } EXPORT_SYMBOL(proc_remove); /* * Pull a user buffer into memory and pass it to the file's write handler if * one is supplied. The ->write() method is permitted to modify the * kernel-side buffer. */ ssize_t proc_simple_write(struct file *f, const char __user *ubuf, size_t size, loff_t *_pos) { struct proc_dir_entry *pde = PDE(file_inode(f)); char *buf; int ret; if (!pde->write) return -EACCES; if (size == 0 || size > PAGE_SIZE - 1) return -EINVAL; buf = memdup_user_nul(ubuf, size); if (IS_ERR(buf)) return PTR_ERR(buf); ret = pde->write(f, buf, size); kfree(buf); return ret == 0 ? size : ret; } |
6 6 5 6 6 6 6 6 6 6 6 6 6 6 6 6 9 9 9 9 9 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 | // SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * rtl871x_xmit.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_XMIT_C_ #include "osdep_service.h" #include "drv_types.h" #include "osdep_intf.h" #include "usb_ops.h" #include <linux/usb.h> #include <linux/ieee80211.h> static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8}; static const u8 RFC1042_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0x00}; static void init_hwxmits(struct hw_xmit *phwxmit, sint entry); static void alloc_hwxmits(struct _adapter *padapter); static void free_hwxmits(struct _adapter *padapter); static void _init_txservq(struct tx_servq *ptxservq) { INIT_LIST_HEAD(&ptxservq->tx_pending); _init_queue(&ptxservq->sta_pending); ptxservq->qcnt = 0; } void _r8712_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv) { memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv)); spin_lock_init(&psta_xmitpriv->lock); _init_txservq(&psta_xmitpriv->be_q); _init_txservq(&psta_xmitpriv->bk_q); _init_txservq(&psta_xmitpriv->vi_q); _init_txservq(&psta_xmitpriv->vo_q); INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz); INIT_LIST_HEAD(&psta_xmitpriv->apsd); } int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, struct _adapter *padapter) { sint i; struct xmit_buf *pxmitbuf; struct xmit_frame *pxframe; int j; memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); spin_lock_init(&pxmitpriv->lock); /* *Please insert all the queue initialization using _init_queue below */ pxmitpriv->adapter = padapter; _init_queue(&pxmitpriv->be_pending); _init_queue(&pxmitpriv->bk_pending); _init_queue(&pxmitpriv->vi_pending); _init_queue(&pxmitpriv->vo_pending); _init_queue(&pxmitpriv->bm_pending); _init_queue(&pxmitpriv->legacy_dz_queue); _init_queue(&pxmitpriv->apsd_queue); _init_queue(&pxmitpriv->free_xmit_queue); /* * Please allocate memory with sz = (struct xmit_frame) * NR_XMITFRAME, * and initialize free_xmit_frame below. * Please also apply free_txobj to link_up all the xmit_frames... */ pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, GFP_ATOMIC); if (!pxmitpriv->pallocated_frame_buf) { pxmitpriv->pxmit_frame_buf = NULL; return -ENOMEM; } pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - ((addr_t) (pxmitpriv->pallocated_frame_buf) & 3); pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf; for (i = 0; i < NR_XMITFRAME; i++) { INIT_LIST_HEAD(&(pxframe->list)); pxframe->padapter = padapter; pxframe->frame_tag = DATA_FRAMETAG; pxframe->pkt = NULL; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue)); pxframe++; } pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME; /* * init xmit hw_txqueue */ _r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->vo_txqueue, VO_QUEUE_INX); _r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX); pxmitpriv->frag_len = MAX_FRAG_THRESHOLD; pxmitpriv->txirp_cnt = 1; /*per AC pending irp*/ pxmitpriv->beq_cnt = 0; pxmitpriv->bkq_cnt = 0; pxmitpriv->viq_cnt = 0; pxmitpriv->voq_cnt = 0; /*init xmit_buf*/ _init_queue(&pxmitpriv->free_xmitbuf_queue); _init_queue(&pxmitpriv->pending_xmitbuf_queue); pxmitpriv->pxmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf), GFP_ATOMIC); if (!pxmitpriv->pxmitbuf) goto clean_up_frame_buf; pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; for (i = 0; i < NR_XMITBUFF; i++) { INIT_LIST_HEAD(&pxmitbuf->list); pxmitbuf->pallocated_buf = kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC); if (!pxmitbuf->pallocated_buf) { j = 0; goto clean_up_alloc_buf; } pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ - ((addr_t) (pxmitbuf->pallocated_buf) & (XMITBUF_ALIGN_SZ - 1)); if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) { j = 1; goto clean_up_alloc_buf; } list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue)); pxmitbuf++; } pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF; INIT_WORK(&padapter->wk_filter_rx_ff0, r8712_SetFilter); alloc_hwxmits(padapter); init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); tasklet_setup(&pxmitpriv->xmit_tasklet, r8712_xmit_bh); return 0; clean_up_alloc_buf: if (j) { /* failure happened in r8712_xmit_resource_alloc() * delete extra pxmitbuf->pallocated_buf */ kfree(pxmitbuf->pallocated_buf); } for (j = 0; j < i; j++) { int k; pxmitbuf--; /* reset pointer */ kfree(pxmitbuf->pallocated_buf); for (k = 0; k < 8; k++) /* delete xmit urb's */ usb_free_urb(pxmitbuf->pxmit_urb[k]); } kfree(pxmitpriv->pxmitbuf); pxmitpriv->pxmitbuf = NULL; clean_up_frame_buf: kfree(pxmitpriv->pallocated_frame_buf); pxmitpriv->pallocated_frame_buf = NULL; return -ENOMEM; } void _free_xmit_priv(struct xmit_priv *pxmitpriv) { int i; struct _adapter *padapter = pxmitpriv->adapter; struct xmit_frame *pxmitframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf; struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; if (!pxmitpriv->pxmit_frame_buf) return; for (i = 0; i < NR_XMITFRAME; i++) { r8712_xmit_complete(padapter, pxmitframe); pxmitframe++; } for (i = 0; i < NR_XMITBUFF; i++) { r8712_xmit_resource_free(padapter, pxmitbuf); kfree(pxmitbuf->pallocated_buf); pxmitbuf++; } kfree(pxmitpriv->pallocated_frame_buf); kfree(pxmitpriv->pxmitbuf); free_hwxmits(padapter); } int r8712_update_attrib(struct _adapter *padapter, _pkt *pkt, struct pkt_attrib *pattrib) { struct pkt_file pktfile; struct sta_info *psta = NULL; struct ethhdr etherhdr; struct tx_cmd txdesc; bool bmcast; struct sta_priv *pstapriv = &padapter->stapriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; _r8712_open_pktfile(pkt, &pktfile); _r8712_pktfile_read(&pktfile, (unsigned char *)ðerhdr, ETH_HLEN); pattrib->ether_type = ntohs(etherhdr.h_proto); /* * If driver xmit ARP packet, driver can set ps mode to initial * setting. It stands for getting DHCP or fix IP. */ if (pattrib->ether_type == 0x0806) { if (padapter->pwrctrlpriv.pwr_mode != padapter->registrypriv.power_mgnt) { del_timer_sync(&pmlmepriv->dhcp_timer); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } } memcpy(pattrib->dst, ðerhdr.h_dest, ETH_ALEN); memcpy(pattrib->src, ðerhdr.h_source, ETH_ALEN); pattrib->pctrl = 0; if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN); } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) { /*firstly, filter packet not belongs to mp*/ if (pattrib->ether_type != 0x8712) return -EINVAL; /* for mp storing the txcmd per packet, * according to the info of txcmd to update pattrib */ /*get MP_TXDESC_SIZE bytes txcmd per packet*/ _r8712_pktfile_read(&pktfile, (u8 *)&txdesc, TXDESC_SIZE); memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); memcpy(pattrib->ta, pattrib->src, ETH_ALEN); pattrib->pctrl = 1; } /* r8712_xmitframe_coalesce() overwrite this!*/ pattrib->pktlen = pktfile.pkt_len; if (pattrib->ether_type == ETH_P_IP) { /* The following is for DHCP and ARP packet, we use cck1M to * tx these packets and let LPS awake some time * to prevent DHCP protocol fail */ u8 tmp[24]; _r8712_pktfile_read(&pktfile, &tmp[0], 24); pattrib->dhcp_pkt = 0; if (pktfile.pkt_len > 282) {/*MINIMUM_DHCP_PACKET_SIZE)*/ if (pattrib->ether_type == ETH_P_IP) {/* IP header*/ if (((tmp[21] == 68) && (tmp[23] == 67)) || ((tmp[21] == 67) && (tmp[23] == 68))) { /* 68 : UDP BOOTP client * 67 : UDP BOOTP server * Use low rate to send DHCP packet. */ pattrib->dhcp_pkt = 1; } } } } bmcast = is_multicast_ether_addr(pattrib->ra); /* get sta_info*/ if (bmcast) { psta = r8712_get_bcmc_stainfo(padapter); pattrib->mac_id = 4; } else { if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) { psta = r8712_get_stainfo(pstapriv, get_bssid(pmlmepriv)); pattrib->mac_id = 5; } else { psta = r8712_get_stainfo(pstapriv, pattrib->ra); if (!psta) /* drop the pkt */ return -ENOMEM; if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) pattrib->mac_id = 5; else pattrib->mac_id = psta->mac_id; } } if (psta) { pattrib->psta = psta; } else { /* if we cannot get psta => drrp the pkt */ return -ENOMEM; } pattrib->ack_policy = 0; /* get ether_hdr_len */ pattrib->pkt_hdrlen = ETH_HLEN; if (pqospriv->qos_option) { r8712_set_qos(&pktfile, pattrib); } else { pattrib->hdrlen = WLAN_HDR_A3_LEN; pattrib->subtype = IEEE80211_FTYPE_DATA; pattrib->priority = 0; } if (psta->ieee8021x_blocked) { pattrib->encrypt = 0; if ((pattrib->ether_type != 0x888e) && !check_fwstate(pmlmepriv, WIFI_MP_STATE)) return -EINVAL; } else { GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast); } switch (pattrib->encrypt) { case _WEP40_: case _WEP104_: pattrib->iv_len = 4; pattrib->icv_len = 4; break; case _TKIP_: pattrib->iv_len = 8; pattrib->icv_len = 4; if (padapter->securitypriv.busetkipkey == _FAIL) return -EINVAL; break; case _AES_: pattrib->iv_len = 8; pattrib->icv_len = 8; break; default: pattrib->iv_len = 0; pattrib->icv_len = 0; break; } if (pattrib->encrypt && (padapter->securitypriv.sw_encrypt || !psecuritypriv->hw_decrypted)) pattrib->bswenc = true; else pattrib->bswenc = false; /* if in MP_STATE, update pkt_attrib from mp_txcmd, and overwrite * some settings above. */ if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) pattrib->priority = (le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f; return 0; } static int xmitframe_addmic(struct _adapter *padapter, struct xmit_frame *pxmitframe) { u32 curfragnum, length; u8 *pframe, *payload, mic[8]; struct mic_data micdata; struct sta_info *stainfo; struct qos_priv *pqospriv = &(padapter->mlmepriv.qospriv); struct pkt_attrib *pattrib = &pxmitframe->attrib; struct security_priv *psecpriv = &padapter->securitypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; u8 priority[4] = {}; bool bmcst = is_multicast_ether_addr(pattrib->ra); if (pattrib->psta) stainfo = pattrib->psta; else stainfo = r8712_get_stainfo(&padapter->stapriv, &pattrib->ra[0]); if (pattrib->encrypt == _TKIP_) { /*encode mic code*/ if (stainfo) { u8 null_key[16] = {}; pframe = pxmitframe->buf_addr + TXDESC_OFFSET; if (bmcst) { if (!memcmp(psecpriv->XGrptxmickey [psecpriv->XGrpKeyid].skey, null_key, 16)) return -ENOMEM; /*start to calculate the mic code*/ r8712_secmicsetkey(&micdata, psecpriv->XGrptxmickey [psecpriv->XGrpKeyid].skey); } else { if (!memcmp(&stainfo->tkiptxmickey.skey[0], null_key, 16)) return -ENOMEM; /* start to calculate the mic code */ r8712_secmicsetkey(&micdata, &stainfo->tkiptxmickey.skey[0]); } if (pframe[1] & 1) { /* ToDS==1 */ r8712_secmicappend(&micdata, &pframe[16], 6); /*DA*/ if (pframe[1] & 2) /* From Ds==1 */ r8712_secmicappend(&micdata, &pframe[24], 6); else r8712_secmicappend(&micdata, &pframe[10], 6); } else { /* ToDS==0 */ r8712_secmicappend(&micdata, &pframe[4], 6); /* DA */ if (pframe[1] & 2) /* From Ds==1 */ r8712_secmicappend(&micdata, &pframe[16], 6); else r8712_secmicappend(&micdata, &pframe[10], 6); } if (pqospriv->qos_option == 1) priority[0] = (u8)pxmitframe->attrib.priority; r8712_secmicappend(&micdata, &priority[0], 4); payload = pframe; for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) { payload = (u8 *)RND4((addr_t)(payload)); payload += pattrib->hdrlen + pattrib->iv_len; if ((curfragnum + 1) == pattrib->nr_frags) { length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - ((psecpriv->sw_encrypt) ? pattrib->icv_len : 0); r8712_secmicappend(&micdata, payload, length); payload = payload + length; } else { length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - ((psecpriv->sw_encrypt) ? pattrib->icv_len : 0); r8712_secmicappend(&micdata, payload, length); payload = payload + length + pattrib->icv_len; } } r8712_secgetmic(&micdata, &(mic[0])); /* add mic code and add the mic code length in * last_txcmdsz */ memcpy(payload, &(mic[0]), 8); pattrib->last_txcmdsz += 8; payload = payload - pattrib->last_txcmdsz + 8; } } return 0; } static sint xmitframe_swencrypt(struct _adapter *padapter, struct xmit_frame *pxmitframe) { struct pkt_attrib *pattrib = &pxmitframe->attrib; if (pattrib->bswenc) { switch (pattrib->encrypt) { case _WEP40_: case _WEP104_: r8712_wep_encrypt(padapter, (u8 *)pxmitframe); break; case _TKIP_: r8712_tkip_encrypt(padapter, (u8 *)pxmitframe); break; case _AES_: r8712_aes_encrypt(padapter, (u8 *)pxmitframe); break; default: break; } } return _SUCCESS; } static int make_wlanhdr(struct _adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib) { u16 *qc; struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; __le16 *fctrl = &pwlanhdr->frame_control; u8 *bssid; memset(hdr, 0, WLANHDR_OFFSET); SetFrameSubType(fctrl, pattrib->subtype); if (!(pattrib->subtype & IEEE80211_FTYPE_DATA)) return 0; bssid = get_bssid(pmlmepriv); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { /* to_ds = 1, fr_ds = 0; */ SetToDs(fctrl); ether_addr_copy(pwlanhdr->addr1, bssid); ether_addr_copy(pwlanhdr->addr2, pattrib->src); ether_addr_copy(pwlanhdr->addr3, pattrib->dst); } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* to_ds = 0, fr_ds = 1; */ SetFrDs(fctrl); ether_addr_copy(pwlanhdr->addr1, pattrib->dst); ether_addr_copy(pwlanhdr->addr2, bssid); ether_addr_copy(pwlanhdr->addr3, pattrib->src); } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { ether_addr_copy(pwlanhdr->addr1, pattrib->dst); ether_addr_copy(pwlanhdr->addr2, pattrib->src); ether_addr_copy(pwlanhdr->addr3, bssid); } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) { ether_addr_copy(pwlanhdr->addr1, pattrib->dst); ether_addr_copy(pwlanhdr->addr2, pattrib->src); ether_addr_copy(pwlanhdr->addr3, bssid); } else { return -EINVAL; } if (pattrib->encrypt) SetPrivacy(fctrl); if (pqospriv->qos_option) { qc = (unsigned short *)(hdr + pattrib->hdrlen - 2); if (pattrib->priority) SetPriority(qc, pattrib->priority); SetAckpolicy(qc, pattrib->ack_policy); } /* TODO: fill HT Control Field */ /* Update Seq Num will be handled by f/w */ { struct sta_info *psta; bool bmcst = is_multicast_ether_addr(pattrib->ra); if (pattrib->psta) psta = pattrib->psta; else if (bmcst) psta = r8712_get_bcmc_stainfo(padapter); else psta = r8712_get_stainfo(&padapter->stapriv, pattrib->ra); if (psta) { u16 *txtid = psta->sta_xmitpriv.txseq_tid; txtid[pattrib->priority]++; txtid[pattrib->priority] &= 0xFFF; pattrib->seqnum = txtid[pattrib->priority]; SetSeqNum(hdr, pattrib->seqnum); } } return 0; } static sint r8712_put_snap(u8 *data, u16 h_proto) { struct ieee80211_snap_hdr *snap; const u8 *oui; snap = (struct ieee80211_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == 0x8137 || h_proto == 0x80f3) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; *(__be16 *)(data + SNAP_SIZE) = htons(h_proto); return SNAP_SIZE + sizeof(u16); } /* * This sub-routine will perform all the following: * 1. remove 802.3 header. * 2. create wlan_header, based on the info in pxmitframe * 3. append sta's iv/ext-iv * 4. append LLC * 5. move frag chunk from pframe to pxmitframe->mem * 6. apply sw-encrypt, if necessary. */ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe) { struct pkt_file pktfile; sint frg_len, mpdu_len, llc_sz; u32 mem_sz; u8 frg_inx; addr_t addr; u8 *pframe, *mem_start, *ptxdesc; struct sta_info *psta; struct security_priv *psecpriv = &padapter->securitypriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; u8 *pbuf_start; bool bmcst = is_multicast_ether_addr(pattrib->ra); if (!pattrib->psta) return _FAIL; psta = pattrib->psta; if (!pxmitframe->buf_addr) return _FAIL; pbuf_start = pxmitframe->buf_addr; ptxdesc = pbuf_start; mem_start = pbuf_start + TXDESC_OFFSET; if (make_wlanhdr(padapter, mem_start, pattrib)) return _FAIL; _r8712_open_pktfile(pkt, &pktfile); _r8712_pktfile_read(&pktfile, NULL, (uint) pattrib->pkt_hdrlen); if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) { /* truncate TXDESC_SIZE bytes txcmd if at mp mode for 871x */ if (pattrib->ether_type == 0x8712) { /* take care - update_txdesc overwrite this */ _r8712_pktfile_read(&pktfile, ptxdesc, TXDESC_SIZE); } } pattrib->pktlen = pktfile.pkt_len; frg_inx = 0; frg_len = pxmitpriv->frag_len - 4; while (1) { llc_sz = 0; mpdu_len = frg_len; pframe = mem_start; SetMFrag(mem_start); pframe += pattrib->hdrlen; mpdu_len -= pattrib->hdrlen; /* adding icv, if necessary...*/ if (pattrib->iv_len) { if (psta) { switch (pattrib->encrypt) { case _WEP40_: case _WEP104_: WEP_IV(pattrib->iv, psta->txpn, (u8)psecpriv->PrivacyKeyIndex); break; case _TKIP_: if (bmcst) TKIP_IV(pattrib->iv, psta->txpn, (u8)psecpriv->XGrpKeyid); else TKIP_IV(pattrib->iv, psta->txpn, 0); break; case _AES_: if (bmcst) AES_IV(pattrib->iv, psta->txpn, (u8)psecpriv->XGrpKeyid); else AES_IV(pattrib->iv, psta->txpn, 0); break; } } memcpy(pframe, pattrib->iv, pattrib->iv_len); pframe += pattrib->iv_len; mpdu_len -= pattrib->iv_len; } if (frg_inx == 0) { llc_sz = r8712_put_snap(pframe, pattrib->ether_type); pframe += llc_sz; mpdu_len -= llc_sz; } if ((pattrib->icv_len > 0) && (pattrib->bswenc)) mpdu_len -= pattrib->icv_len; if (bmcst) mem_sz = _r8712_pktfile_read(&pktfile, pframe, pattrib->pktlen); else mem_sz = _r8712_pktfile_read(&pktfile, pframe, mpdu_len); pframe += mem_sz; if ((pattrib->icv_len > 0) && (pattrib->bswenc)) { memcpy(pframe, pattrib->icv, pattrib->icv_len); pframe += pattrib->icv_len; } frg_inx++; if (bmcst || r8712_endofpktfile(&pktfile)) { pattrib->nr_frags = frg_inx; pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) + ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz; ClearMFrag(mem_start); break; } addr = (addr_t)(pframe); mem_start = (unsigned char *)RND4(addr) + TXDESC_OFFSET; memcpy(mem_start, pbuf_start + TXDESC_OFFSET, pattrib->hdrlen); } if (xmitframe_addmic(padapter, pxmitframe)) return _FAIL; xmitframe_swencrypt(padapter, pxmitframe); return _SUCCESS; } void r8712_update_protection(struct _adapter *padapter, u8 *ie, uint ie_len) { uint protection; u8 *perp; uint erp_len; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; switch (pxmitpriv->vcs_setting) { case DISABLE_VCS: pxmitpriv->vcs = NONE_VCS; break; case ENABLE_VCS: break; case AUTO_VCS: default: perp = r8712_get_ie(ie, WLAN_EID_ERP_INFO, &erp_len, ie_len); if (!perp) { pxmitpriv->vcs = NONE_VCS; } else { protection = (*(perp + 2)) & BIT(1); if (protection) { if (pregistrypriv->vcs_type == RTS_CTS) pxmitpriv->vcs = RTS_CTS; else pxmitpriv->vcs = CTS_TO_SELF; } else { pxmitpriv->vcs = NONE_VCS; } } break; } } struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv) { unsigned long irqL; struct xmit_buf *pxmitbuf; struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL); pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue, struct xmit_buf, list); if (pxmitbuf) { list_del_init(&pxmitbuf->list); pxmitpriv->free_xmitbuf_cnt--; } spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL); return pxmitbuf; } void r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { unsigned long irqL; struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; if (!pxmitbuf) return; spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL); list_del_init(&pxmitbuf->list); list_add_tail(&(pxmitbuf->list), &pfree_xmitbuf_queue->queue); pxmitpriv->free_xmitbuf_cnt++; spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL); } /* * Calling context: * 1. OS_TXENTRY * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack) * * If we turn on USE_RXTHREAD, then, no need for critical section. * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue... * * Must be very very cautious... * */ struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv) { /* * Please remember to use all the osdep_service api, * and lock/unlock or _enter/_exit critical to protect * pfree_xmit_queue */ unsigned long irqL; struct xmit_frame *pxframe; struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; spin_lock_irqsave(&pfree_xmit_queue->lock, irqL); pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue, struct xmit_frame, list); if (pxframe) { list_del_init(&pxframe->list); pxmitpriv->free_xmitframe_cnt--; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; pxframe->attrib.psta = NULL; pxframe->pkt = NULL; } spin_unlock_irqrestore(&pfree_xmit_queue->lock, irqL); return pxframe; } void r8712_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe) { unsigned long irqL; struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; struct _adapter *padapter = pxmitpriv->adapter; if (!pxmitframe) return; spin_lock_irqsave(&pfree_xmit_queue->lock, irqL); list_del_init(&pxmitframe->list); if (pxmitframe->pkt) pxmitframe->pkt = NULL; list_add_tail(&pxmitframe->list, &pfree_xmit_queue->queue); pxmitpriv->free_xmitframe_cnt++; spin_unlock_irqrestore(&pfree_xmit_queue->lock, irqL); if (netif_queue_stopped(padapter->pnetdev)) netif_wake_queue(padapter->pnetdev); } void r8712_free_xmitframe_ex(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe) { if (!pxmitframe) return; if (pxmitframe->frame_tag == DATA_FRAMETAG) r8712_free_xmitframe(pxmitpriv, pxmitframe); } void r8712_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue) { unsigned long irqL; struct list_head *plist, *phead; struct xmit_frame *pxmitframe; spin_lock_irqsave(&(pframequeue->lock), irqL); phead = &pframequeue->queue; plist = phead->next; while (!end_of_queue_search(phead, plist)) { pxmitframe = container_of(plist, struct xmit_frame, list); plist = plist->next; r8712_free_xmitframe(pxmitpriv, pxmitframe); } spin_unlock_irqrestore(&(pframequeue->lock), irqL); } static inline struct tx_servq *get_sta_pending(struct _adapter *padapter, struct __queue **ppstapending, struct sta_info *psta, sint up) { struct tx_servq *ptxservq; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; switch (up) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); *ppstapending = &padapter->xmitpriv.bk_pending; (phwxmits + 3)->accnt++; break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); *ppstapending = &padapter->xmitpriv.vi_pending; (phwxmits + 1)->accnt++; break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); *ppstapending = &padapter->xmitpriv.vo_pending; (phwxmits + 0)->accnt++; break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); *ppstapending = &padapter->xmitpriv.be_pending; (phwxmits + 2)->accnt++; break; } return ptxservq; } /* * Will enqueue pxmitframe to the proper queue, and indicate it * to xx_pending list..... */ int r8712_xmit_classifier(struct _adapter *padapter, struct xmit_frame *pxmitframe) { unsigned long irqL0; struct __queue *pstapending; struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; bool bmcst = is_multicast_ether_addr(pattrib->ra); if (pattrib->psta) { psta = pattrib->psta; } else { if (bmcst) { psta = r8712_get_bcmc_stainfo(padapter); } else { if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) psta = r8712_get_stainfo(pstapriv, get_bssid(pmlmepriv)); else psta = r8712_get_stainfo(pstapriv, pattrib->ra); } } if (!psta) return -EINVAL; ptxservq = get_sta_pending(padapter, &pstapending, psta, pattrib->priority); spin_lock_irqsave(&pstapending->lock, irqL0); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, &pstapending->queue); list_add_tail(&pxmitframe->list, &ptxservq->sta_pending.queue); ptxservq->qcnt++; spin_unlock_irqrestore(&pstapending->lock, irqL0); return 0; } static void alloc_hwxmits(struct _adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; pxmitpriv->hwxmit_entry = HWXMIT_ENTRY; pxmitpriv->hwxmits = kmalloc_array(pxmitpriv->hwxmit_entry, sizeof(struct hw_xmit), GFP_ATOMIC); if (!pxmitpriv->hwxmits) return; hwxmits = pxmitpriv->hwxmits; if (pxmitpriv->hwxmit_entry == 5) { pxmitpriv->bmc_txqueue.head = 0; hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; hwxmits[0] .sta_queue = &pxmitpriv->bm_pending; pxmitpriv->vo_txqueue.head = 0; hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; hwxmits[1] .sta_queue = &pxmitpriv->vo_pending; pxmitpriv->vi_txqueue.head = 0; hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; hwxmits[2] .sta_queue = &pxmitpriv->vi_pending; pxmitpriv->bk_txqueue.head = 0; hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; pxmitpriv->be_txqueue.head = 0; hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; hwxmits[4] .sta_queue = &pxmitpriv->be_pending; } else if (pxmitpriv->hwxmit_entry == 4) { pxmitpriv->vo_txqueue.head = 0; hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; hwxmits[0] .sta_queue = &pxmitpriv->vo_pending; pxmitpriv->vi_txqueue.head = 0; hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; pxmitpriv->be_txqueue.head = 0; hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; hwxmits[2] .sta_queue = &pxmitpriv->be_pending; pxmitpriv->bk_txqueue.head = 0; hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; } } static void free_hwxmits(struct _adapter *padapter) { struct xmit_priv *pxmitpriv = &padapter->xmitpriv; kfree(pxmitpriv->hwxmits); } static void init_hwxmits(struct hw_xmit *phwxmit, sint entry) { sint i; for (i = 0; i < entry; i++, phwxmit++) { spin_lock_init(&phwxmit->xmit_lock); INIT_LIST_HEAD(&phwxmit->pending); phwxmit->txcmdcnt = 0; phwxmit->accnt = 0; } } void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe, struct xmit_buf *pxmitbuf) { /* pxmitbuf attach to pxmitframe */ pxmitframe->pxmitbuf = pxmitbuf; /* urb and irp connection */ pxmitframe->pxmit_urb[0] = pxmitbuf->pxmit_urb[0]; /* buffer addr assoc */ pxmitframe->buf_addr = pxmitbuf->pbuf; /* pxmitframe attach to pxmitbuf */ pxmitbuf->priv_data = pxmitframe; } /* * tx_action == 0 == no frames to transmit * tx_action > 0 ==> we have frames to transmit * tx_action < 0 ==> we have frames to transmit, but TXFF is not even enough * to transmit 1 frame. */ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe) { unsigned long irqL; int ret; struct xmit_buf *pxmitbuf = NULL; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; r8712_do_queue_select(padapter, pattrib); spin_lock_irqsave(&pxmitpriv->lock, irqL); if (r8712_txframes_sta_ac_pending(padapter, pattrib) > 0) { ret = false; r8712_xmit_enqueue(padapter, pxmitframe); spin_unlock_irqrestore(&pxmitpriv->lock, irqL); return ret; } pxmitbuf = r8712_alloc_xmitbuf(pxmitpriv); if (!pxmitbuf) { /*enqueue packet*/ ret = false; r8712_xmit_enqueue(padapter, pxmitframe); spin_unlock_irqrestore(&pxmitpriv->lock, irqL); } else { /*dump packet directly*/ spin_unlock_irqrestore(&pxmitpriv->lock, irqL); ret = true; xmitframe_xmitbuf_attach(pxmitframe, pxmitbuf); r8712_xmit_direct(padapter, pxmitframe); } return ret; } |
6 6 6 6 6 6 6 6 6 6 2 6 6 4 6 6 6 6 1 2 2 1 6 3 1 2 1 1 3 2 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 | // SPDX-License-Identifier: GPL-2.0-only /* * RTL8XXXU mac80211 USB driver - 8710bu aka 8188gu specific subdriver * * Copyright (c) 2023 Bitterblue Smith <rtl8821cerfe2@gmail.com> * * Portions copied from existing rtl8xxxu code: * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. */ #include "regs.h" #include "rtl8xxxu.h" static const struct rtl8xxxu_reg8val rtl8710b_mac_init_table[] = { {0x421, 0x0F}, {0x428, 0x0A}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00}, {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, {0x436, 0x07}, {0x437, 0x08}, {0x43C, 0x04}, {0x43D, 0x05}, {0x43E, 0x07}, {0x43F, 0x08}, {0x440, 0x5D}, {0x441, 0x01}, {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00}, {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xF0}, {0x44A, 0x0F}, {0x44B, 0x3E}, {0x44C, 0x10}, {0x44D, 0x00}, {0x44E, 0x00}, {0x44F, 0x00}, {0x450, 0x00}, {0x451, 0xF0}, {0x452, 0x0F}, {0x453, 0x00}, {0x456, 0x5E}, {0x460, 0x66}, {0x461, 0x66}, {0x4C8, 0xFF}, {0x4C9, 0x08}, {0x4CC, 0xFF}, {0x4CD, 0xFF}, {0x4CE, 0x01}, {0x500, 0x26}, {0x501, 0xA2}, {0x502, 0x2F}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xA3}, {0x506, 0x5E}, {0x507, 0x00}, {0x508, 0x2B}, {0x509, 0xA4}, {0x50A, 0x5E}, {0x50B, 0x00}, {0x50C, 0x4F}, {0x50D, 0xA4}, {0x50E, 0x00}, {0x50F, 0x00}, {0x512, 0x1C}, {0x514, 0x0A}, {0x516, 0x0A}, {0x525, 0x4F}, {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55C, 0x28}, {0x55D, 0xFF}, {0x605, 0x30}, {0x608, 0x0E}, {0x609, 0x2A}, {0x620, 0xFF}, {0x621, 0xFF}, {0x622, 0xFF}, {0x623, 0xFF}, {0x624, 0xFF}, {0x625, 0xFF}, {0x626, 0xFF}, {0x627, 0xFF}, {0x638, 0x28}, {0x63C, 0x0A}, {0x63D, 0x0A}, {0x63E, 0x0C}, {0x63F, 0x0C}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xC8}, {0x66A, 0xB0}, {0x66E, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70A, 0x65}, {0x70B, 0x87}, {0xffff, 0xff}, }; /* If updating the phy init tables, also update rtl8710b_revise_cck_tx_psf(). */ static const struct rtl8xxxu_reg32val rtl8710bu_qfn48m_u_phy_init_table[] = { {0x800, 0x80045700}, {0x804, 0x00000001}, {0x808, 0x00FC8000}, {0x80C, 0x0000000A}, {0x810, 0x10001331}, {0x814, 0x020C3D10}, {0x818, 0x00200385}, {0x81C, 0x00000000}, {0x820, 0x01000100}, {0x824, 0x00390204}, {0x828, 0x00000000}, {0x82C, 0x00000000}, {0x830, 0x00000000}, {0x834, 0x00000000}, {0x838, 0x00000000}, {0x83C, 0x00000000}, {0x840, 0x00010000}, {0x844, 0x00000000}, {0x848, 0x00000000}, {0x84C, 0x00000000}, {0x850, 0x00030000}, {0x854, 0x00000000}, {0x858, 0x7E1A569A}, {0x85C, 0x569A569A}, {0x860, 0x00000130}, {0x864, 0x20000000}, {0x868, 0x00000000}, {0x86C, 0x27272700}, {0x870, 0x00050000}, {0x874, 0x25005000}, {0x878, 0x00000808}, {0x87C, 0x004F0201}, {0x880, 0xB0000B1E}, {0x884, 0x00000007}, {0x888, 0x00000000}, {0x88C, 0xCCC400C0}, {0x890, 0x00000800}, {0x894, 0xFFFFFFFE}, {0x898, 0x40302010}, {0x89C, 0x00706050}, {0x900, 0x00000000}, {0x904, 0x00000023}, {0x908, 0x00000000}, {0x90C, 0x81121111}, {0x910, 0x00000402}, {0x914, 0x00000201}, {0x920, 0x18C6318C}, {0x924, 0x0000018C}, {0x948, 0x99000000}, {0x94C, 0x00000010}, {0x950, 0x00003000}, {0x954, 0x5A880000}, {0x958, 0x4BC6D87A}, {0x95C, 0x04EB9B79}, {0x96C, 0x00000003}, {0x970, 0x00000000}, {0x974, 0x00000000}, {0x978, 0x00000000}, {0x97C, 0x13000000}, {0x980, 0x00000000}, {0xA00, 0x00D046C8}, {0xA04, 0x80FF800C}, {0xA08, 0x84838300}, {0xA0C, 0x2E20100F}, {0xA10, 0x9500BB78}, {0xA14, 0x1114D028}, {0xA18, 0x00881117}, {0xA1C, 0x89140F00}, {0xA20, 0xE82C0001}, {0xA24, 0x64B80C1C}, {0xA28, 0x00008810}, {0xA2C, 0x00D30000}, {0xA70, 0x101FBF00}, {0xA74, 0x00000007}, {0xA78, 0x00000900}, {0xA7C, 0x225B0606}, {0xA80, 0x218075B1}, {0xA84, 0x00200000}, {0xA88, 0x040C0000}, {0xA8C, 0x12345678}, {0xA90, 0xABCDEF00}, {0xA94, 0x001B1B89}, {0xA98, 0x00000000}, {0xA9C, 0x80020000}, {0xAA0, 0x00000000}, {0xAA4, 0x0000000C}, {0xAA8, 0xCA110058}, {0xAAC, 0x01235667}, {0xAB0, 0x00000000}, {0xAB4, 0x20201402}, {0xB2C, 0x00000000}, {0xC00, 0x48071D40}, {0xC04, 0x03A05611}, {0xC08, 0x000000E4}, {0xC0C, 0x6C6C6C6C}, {0xC10, 0x18800000}, {0xC14, 0x40000100}, {0xC18, 0x08800000}, {0xC1C, 0x40000100}, {0xC20, 0x00000000}, {0xC24, 0x00000000}, {0xC28, 0x00000000}, {0xC2C, 0x00000000}, {0xC30, 0x69E9AC4A}, {0xC34, 0x31000040}, {0xC38, 0x21688080}, {0xC3C, 0x0000170C}, {0xC40, 0x1F78403F}, {0xC44, 0x00010036}, {0xC48, 0xEC020107}, {0xC4C, 0x007F037F}, {0xC50, 0x69553420}, {0xC54, 0x43BC0094}, {0xC58, 0x00013169}, {0xC5C, 0x00250492}, {0xC60, 0x00280A00}, {0xC64, 0x7112848B}, {0xC68, 0x47C074FF}, {0xC6C, 0x00000036}, {0xC70, 0x2C7F000D}, {0xC74, 0x020600DB}, {0xC78, 0x0000001F}, {0xC7C, 0x00B91612}, {0xC80, 0x390000E4}, {0xC84, 0x11F60000}, {0xC88, 0x1051B75F}, {0xC8C, 0x20200109}, {0xC90, 0x00091521}, {0xC94, 0x00000000}, {0xC98, 0x00121820}, {0xC9C, 0x00007F7F}, {0xCA0, 0x00011000}, {0xCA4, 0x800000A0}, {0xCA8, 0x84E6C606}, {0xCAC, 0x00000060}, {0xCB0, 0x00000000}, {0xCB4, 0x00000000}, {0xCB8, 0x00000000}, {0xCBC, 0x28000000}, {0xCC0, 0x1051B75F}, {0xCC4, 0x00000109}, {0xCC8, 0x000442D6}, {0xCCC, 0x00000000}, {0xCD0, 0x000001C8}, {0xCD4, 0x001C8000}, {0xCD8, 0x00000100}, {0xCDC, 0x40100000}, {0xCE0, 0x00222220}, {0xCE4, 0x10000000}, {0xCE8, 0x37644302}, {0xCEC, 0x2F97D40C}, {0xD00, 0x04030740}, {0xD04, 0x40020401}, {0xD08, 0x0000907F}, {0xD0C, 0x20010201}, {0xD10, 0xA0633333}, {0xD14, 0x3333BC53}, {0xD18, 0x7A8F5B6F}, {0xD2C, 0xCB979975}, {0xD30, 0x00000000}, {0xD34, 0x40608000}, {0xD38, 0x88000000}, {0xD3C, 0xC0127353}, {0xD40, 0x00000000}, {0xD44, 0x00000000}, {0xD48, 0x00000000}, {0xD4C, 0x00000000}, {0xD50, 0x00006528}, {0xD54, 0x00000000}, {0xD58, 0x00000282}, {0xD5C, 0x30032064}, {0xD60, 0x4653DE68}, {0xD64, 0x04518A3C}, {0xD68, 0x00002101}, {0xE00, 0x2D2D2D2D}, {0xE04, 0x2D2D2D2D}, {0xE08, 0x0390272D}, {0xE10, 0x2D2D2D2D}, {0xE14, 0x2D2D2D2D}, {0xE18, 0x2D2D2D2D}, {0xE1C, 0x2D2D2D2D}, {0xE28, 0x00000000}, {0xE30, 0x1000DC1F}, {0xE34, 0x10008C1F}, {0xE38, 0x02140102}, {0xE3C, 0x681604C2}, {0xE40, 0x01007C00}, {0xE44, 0x01004800}, {0xE48, 0xFB000000}, {0xE4C, 0x000028D1}, {0xE50, 0x1000DC1F}, {0xE54, 0x10008C1F}, {0xE58, 0x02140102}, {0xE5C, 0x28160D05}, {0xE60, 0x0000C008}, {0xE68, 0x001B25A4}, {0xE64, 0x281600A0}, {0xE6C, 0x01C00010}, {0xE70, 0x01C00010}, {0xE74, 0x02000010}, {0xE78, 0x02000010}, {0xE7C, 0x02000010}, {0xE80, 0x02000010}, {0xE84, 0x01C00010}, {0xE88, 0x02000010}, {0xE8C, 0x01C00010}, {0xED0, 0x01C00010}, {0xED4, 0x01C00010}, {0xED8, 0x01C00010}, {0xEDC, 0x00000010}, {0xEE0, 0x00000010}, {0xEEC, 0x03C00010}, {0xF14, 0x00000003}, {0xF00, 0x00100300}, {0xF08, 0x0000800B}, {0xF0C, 0x0000F007}, {0xF10, 0x0000A487}, {0xF1C, 0x80000064}, {0xF38, 0x00030155}, {0xF3C, 0x0000003A}, {0xF4C, 0x13000000}, {0xF50, 0x00000000}, {0xF18, 0x00000000}, {0xffff, 0xffffffff}, }; /* If updating the phy init tables, also update rtl8710b_revise_cck_tx_psf(). */ static const struct rtl8xxxu_reg32val rtl8710bu_qfn48m_s_phy_init_table[] = { {0x800, 0x80045700}, {0x804, 0x00000001}, {0x808, 0x00FC8000}, {0x80C, 0x0000000A}, {0x810, 0x10001331}, {0x814, 0x020C3D10}, {0x818, 0x00200385}, {0x81C, 0x00000000}, {0x820, 0x01000100}, {0x824, 0x00390204}, {0x828, 0x00000000}, {0x82C, 0x00000000}, {0x830, 0x00000000}, {0x834, 0x00000000}, {0x838, 0x00000000}, {0x83C, 0x00000000}, {0x840, 0x00010000}, {0x844, 0x00000000}, {0x848, 0x00000000}, {0x84C, 0x00000000}, {0x850, 0x00030000}, {0x854, 0x00000000}, {0x858, 0x7E1A569A}, {0x85C, 0x569A569A}, {0x860, 0x00000130}, {0x864, 0x20000000}, {0x868, 0x00000000}, {0x86C, 0x27272700}, {0x870, 0x00050000}, {0x874, 0x25005000}, {0x878, 0x00000808}, {0x87C, 0x004F0201}, {0x880, 0xB0000B1E}, {0x884, 0x00000007}, {0x888, 0x00000000}, {0x88C, 0xCCC400C0}, {0x890, 0x00000800}, {0x894, 0xFFFFFFFE}, {0x898, 0x40302010}, {0x89C, 0x00706050}, {0x900, 0x00000000}, {0x904, 0x00000023}, {0x908, 0x00000000}, {0x90C, 0x81121111}, {0x910, 0x00000402}, {0x914, 0x00000201}, {0x920, 0x18C6318C}, {0x924, 0x0000018C}, {0x948, 0x99000000}, {0x94C, 0x00000010}, {0x950, 0x00003000}, {0x954, 0x5A880000}, {0x958, 0x4BC6D87A}, {0x95C, 0x04EB9B79}, {0x96C, 0x00000003}, {0x970, 0x00000000}, {0x974, 0x00000000}, {0x978, 0x00000000}, {0x97C, 0x13000000}, {0x980, 0x00000000}, {0xA00, 0x00D046C8}, {0xA04, 0x80FF800C}, {0xA08, 0x84838300}, {0xA0C, 0x2A20100F}, {0xA10, 0x9500BB78}, {0xA14, 0x1114D028}, {0xA18, 0x00881117}, {0xA1C, 0x89140F00}, {0xA20, 0xE82C0001}, {0xA24, 0x64B80C1C}, {0xA28, 0x00008810}, {0xA2C, 0x00D30000}, {0xA70, 0x101FBF00}, {0xA74, 0x00000007}, {0xA78, 0x00000900}, {0xA7C, 0x225B0606}, {0xA80, 0x218075B1}, {0xA84, 0x00200000}, {0xA88, 0x040C0000}, {0xA8C, 0x12345678}, {0xA90, 0xABCDEF00}, {0xA94, 0x001B1B89}, {0xA98, 0x00000000}, {0xA9C, 0x80020000}, {0xAA0, 0x00000000}, {0xAA4, 0x0000000C}, {0xAA8, 0xCA110058}, {0xAAC, 0x01235667}, {0xAB0, 0x00000000}, {0xAB4, 0x20201402}, {0xB2C, 0x00000000}, {0xC00, 0x48071D40}, {0xC04, 0x03A05611}, {0xC08, 0x000000E4}, {0xC0C, 0x6C6C6C6C}, {0xC10, 0x18800000}, {0xC14, 0x40000100}, {0xC18, 0x08800000}, {0xC1C, 0x40000100}, {0xC20, 0x00000000}, {0xC24, 0x00000000}, {0xC28, 0x00000000}, {0xC2C, 0x00000000}, {0xC30, 0x69E9AC4A}, {0xC34, 0x31000040}, {0xC38, 0x21688080}, {0xC3C, 0x0000170C}, {0xC40, 0x1F78403F}, {0xC44, 0x00010036}, {0xC48, 0xEC020107}, {0xC4C, 0x007F037F}, {0xC50, 0x69553420}, {0xC54, 0x43BC0094}, {0xC58, 0x00013169}, {0xC5C, 0x00250492}, {0xC60, 0x00280A00}, {0xC64, 0x7112848B}, {0xC68, 0x47C074FF}, {0xC6C, 0x00000036}, {0xC70, 0x2C7F000D}, {0xC74, 0x020600DB}, {0xC78, 0x0000001F}, {0xC7C, 0x00B91612}, {0xC80, 0x390000E4}, {0xC84, 0x11F60000}, {0xC88, 0x1051B75F}, {0xC8C, 0x20200109}, {0xC90, 0x00091521}, {0xC94, 0x00000000}, {0xC98, 0x00121820}, {0xC9C, 0x00007F7F}, {0xCA0, 0x00011000}, {0xCA4, 0x800000A0}, {0xCA8, 0x84E6C606}, {0xCAC, 0x00000060}, {0xCB0, 0x00000000}, {0xCB4, 0x00000000}, {0xCB8, 0x00000000}, {0xCBC, 0x28000000}, {0xCC0, 0x1051B75F}, {0xCC4, 0x00000109}, {0xCC8, 0x000442D6}, {0xCCC, 0x00000000}, {0xCD0, 0x000001C8}, {0xCD4, 0x001C8000}, {0xCD8, 0x00000100}, {0xCDC, 0x40100000}, {0xCE0, 0x00222220}, {0xCE4, 0x10000000}, {0xCE8, 0x37644302}, {0xCEC, 0x2F97D40C}, {0xD00, 0x04030740}, {0xD04, 0x40020401}, {0xD08, 0x0000907F}, {0xD0C, 0x20010201}, {0xD10, 0xA0633333}, {0xD14, 0x3333BC53}, {0xD18, 0x7A8F5B6F}, {0xD2C, 0xCB979975}, {0xD30, 0x00000000}, {0xD34, 0x40608000}, {0xD38, 0x88000000}, {0xD3C, 0xC0127353}, {0xD40, 0x00000000}, {0xD44, 0x00000000}, {0xD48, 0x00000000}, {0xD4C, 0x00000000}, {0xD50, 0x00006528}, {0xD54, 0x00000000}, {0xD58, 0x00000282}, {0xD5C, 0x30032064}, {0xD60, 0x4653DE68}, {0xD64, 0x04518A3C}, {0xD68, 0x00002101}, {0xE00, 0x2D2D2D2D}, {0xE04, 0x2D2D2D2D}, {0xE08, 0x0390272D}, {0xE10, 0x2D2D2D2D}, {0xE14, 0x2D2D2D2D}, {0xE18, 0x2D2D2D2D}, {0xE1C, 0x2D2D2D2D}, {0xE28, 0x00000000}, {0xE30, 0x1000DC1F}, {0xE34, 0x10008C1F}, {0xE38, 0x02140102}, {0xE3C, 0x681604C2}, {0xE40, 0x01007C00}, {0xE44, 0x01004800}, {0xE48, 0xFB000000}, {0xE4C, 0x000028D1}, {0xE50, 0x1000DC1F}, {0xE54, 0x10008C1F}, {0xE58, 0x02140102}, {0xE5C, 0x28160D05}, {0xE60, 0x0000C008}, {0xE68, 0x001B25A4}, {0xE64, 0x281600A0}, {0xE6C, 0x01C00010}, {0xE70, 0x01C00010}, {0xE74, 0x02000010}, {0xE78, 0x02000010}, {0xE7C, 0x02000010}, {0xE80, 0x02000010}, {0xE84, 0x01C00010}, {0xE88, 0x02000010}, {0xE8C, 0x01C00010}, {0xED0, 0x01C00010}, {0xED4, 0x01C00010}, {0xED8, 0x01C00010}, {0xEDC, 0x00000010}, {0xEE0, 0x00000010}, {0xEEC, 0x03C00010}, {0xF14, 0x00000003}, {0xF00, 0x00100300}, {0xF08, 0x0000800B}, {0xF0C, 0x0000F007}, {0xF10, 0x0000A487}, {0xF1C, 0x80000064}, {0xF38, 0x00030155}, {0xF3C, 0x0000003A}, {0xF4C, 0x13000000}, {0xF50, 0x00000000}, {0xF18, 0x00000000}, {0xffff, 0xffffffff}, }; static const struct rtl8xxxu_reg32val rtl8710b_agc_table[] = { {0xC78, 0xFC000001}, {0xC78, 0xFB010001}, {0xC78, 0xFA020001}, {0xC78, 0xF9030001}, {0xC78, 0xF8040001}, {0xC78, 0xF7050001}, {0xC78, 0xF6060001}, {0xC78, 0xF5070001}, {0xC78, 0xF4080001}, {0xC78, 0xF3090001}, {0xC78, 0xF20A0001}, {0xC78, 0xF10B0001}, {0xC78, 0xF00C0001}, {0xC78, 0xEF0D0001}, {0xC78, 0xEE0E0001}, {0xC78, 0xED0F0001}, {0xC78, 0xEC100001}, {0xC78, 0xEB110001}, {0xC78, 0xEA120001}, {0xC78, 0xE9130001}, {0xC78, 0xE8140001}, {0xC78, 0xE7150001}, {0xC78, 0xE6160001}, {0xC78, 0xE5170001}, {0xC78, 0xE4180001}, {0xC78, 0xE3190001}, {0xC78, 0xE21A0001}, {0xC78, 0xE11B0001}, {0xC78, 0xE01C0001}, {0xC78, 0xC31D0001}, {0xC78, 0xC21E0001}, {0xC78, 0xC11F0001}, {0xC78, 0xC0200001}, {0xC78, 0xA3210001}, {0xC78, 0xA2220001}, {0xC78, 0xA1230001}, {0xC78, 0xA0240001}, {0xC78, 0x86250001}, {0xC78, 0x85260001}, {0xC78, 0x84270001}, {0xC78, 0x83280001}, {0xC78, 0x82290001}, {0xC78, 0x812A0001}, {0xC78, 0x802B0001}, {0xC78, 0x632C0001}, {0xC78, 0x622D0001}, {0xC78, 0x612E0001}, {0xC78, 0x602F0001}, {0xC78, 0x42300001}, {0xC78, 0x41310001}, {0xC78, 0x40320001}, {0xC78, 0x23330001}, {0xC78, 0x22340001}, {0xC78, 0x21350001}, {0xC78, 0x20360001}, {0xC78, 0x02370001}, {0xC78, 0x01380001}, {0xC78, 0x00390001}, {0xC78, 0x003A0001}, {0xC78, 0x003B0001}, {0xC78, 0x003C0001}, {0xC78, 0x003D0001}, {0xC78, 0x003E0001}, {0xC78, 0x003F0001}, {0xC78, 0xF7400001}, {0xC78, 0xF7410001}, {0xC78, 0xF7420001}, {0xC78, 0xF7430001}, {0xC78, 0xF7440001}, {0xC78, 0xF7450001}, {0xC78, 0xF7460001}, {0xC78, 0xF7470001}, {0xC78, 0xF7480001}, {0xC78, 0xF6490001}, {0xC78, 0xF34A0001}, {0xC78, 0xF24B0001}, {0xC78, 0xF14C0001}, {0xC78, 0xF04D0001}, {0xC78, 0xD14E0001}, {0xC78, 0xD04F0001}, {0xC78, 0xB5500001}, {0xC78, 0xB4510001}, {0xC78, 0xB3520001}, {0xC78, 0xB2530001}, {0xC78, 0xB1540001}, {0xC78, 0xB0550001}, {0xC78, 0xAF560001}, {0xC78, 0xAE570001}, {0xC78, 0xAD580001}, {0xC78, 0xAC590001}, {0xC78, 0xAB5A0001}, {0xC78, 0xAA5B0001}, {0xC78, 0xA95C0001}, {0xC78, 0xA85D0001}, {0xC78, 0xA75E0001}, {0xC78, 0xA65F0001}, {0xC78, 0xA5600001}, {0xC78, 0xA4610001}, {0xC78, 0xA3620001}, {0xC78, 0xA2630001}, {0xC78, 0xA1640001}, {0xC78, 0xA0650001}, {0xC78, 0x87660001}, {0xC78, 0x86670001}, {0xC78, 0x85680001}, {0xC78, 0x84690001}, {0xC78, 0x836A0001}, {0xC78, 0x826B0001}, {0xC78, 0x816C0001}, {0xC78, 0x806D0001}, {0xC78, 0x636E0001}, {0xC78, 0x626F0001}, {0xC78, 0x61700001}, {0xC78, 0x60710001}, {0xC78, 0x42720001}, {0xC78, 0x41730001}, {0xC78, 0x40740001}, {0xC78, 0x23750001}, {0xC78, 0x22760001}, {0xC78, 0x21770001}, {0xC78, 0x20780001}, {0xC78, 0x03790001}, {0xC78, 0x027A0001}, {0xC78, 0x017B0001}, {0xC78, 0x007C0001}, {0xC78, 0x007D0001}, {0xC78, 0x007E0001}, {0xC78, 0x007F0001}, {0xC50, 0x69553422}, {0xC50, 0x69553420}, {0xffff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8710bu_qfn48m_u_radioa_init_table[] = { {0x00, 0x00030000}, {0x08, 0x00008400}, {0x17, 0x00000000}, {0x18, 0x00000C01}, {0x19, 0x000739D2}, {0x1C, 0x00000C4C}, {0x1B, 0x00000C6C}, {0x1E, 0x00080009}, {0x1F, 0x00000880}, {0x2F, 0x0001A060}, {0x3F, 0x00015000}, {0x42, 0x000060C0}, {0x57, 0x000D0000}, {0x58, 0x000C0160}, {0x67, 0x00001552}, {0x83, 0x00000000}, {0xB0, 0x000FF9F0}, {0xB1, 0x00010018}, {0xB2, 0x00054C00}, {0xB4, 0x0004486B}, {0xB5, 0x0000112A}, {0xB6, 0x0000053E}, {0xB7, 0x00014408}, {0xB8, 0x00010200}, {0xB9, 0x00080801}, {0xBA, 0x00040001}, {0xBB, 0x00000400}, {0xBF, 0x000C0000}, {0xC2, 0x00002400}, {0xC3, 0x00000009}, {0xC4, 0x00040C91}, {0xC5, 0x00099999}, {0xC6, 0x000000A3}, {0xC7, 0x00088820}, {0xC8, 0x00076C06}, {0xC9, 0x00000000}, {0xCA, 0x00080000}, {0xDF, 0x00000180}, {0xEF, 0x000001A8}, {0x3D, 0x00000003}, {0x3D, 0x00080003}, {0x51, 0x000F1E69}, {0x52, 0x000FBF6C}, {0x53, 0x0000032F}, {0x54, 0x00055007}, {0x56, 0x000517F0}, {0x35, 0x000000F4}, {0x35, 0x00000179}, {0x35, 0x000002F4}, {0x36, 0x00000BF8}, {0x36, 0x00008BF8}, {0x36, 0x00010BF8}, {0x36, 0x00018BF8}, {0x18, 0x00000C01}, {0x5A, 0x00048000}, {0x5A, 0x00048000}, {0x34, 0x0000ADF5}, {0x34, 0x00009DF2}, {0x34, 0x00008DEF}, {0x34, 0x00007DEC}, {0x34, 0x00006DE9}, {0x34, 0x00005CEC}, {0x34, 0x00004CE9}, {0x34, 0x00003C6C}, {0x34, 0x00002C69}, {0x34, 0x0000106E}, {0x34, 0x0000006B}, {0x84, 0x00048000}, {0x87, 0x00000065}, {0x8E, 0x00065540}, {0xDF, 0x00000110}, {0x86, 0x0000002A}, {0x8F, 0x00088000}, {0x81, 0x0003FD80}, {0xEF, 0x00082000}, {0x3B, 0x000F0F00}, {0x3B, 0x000E0E00}, {0x3B, 0x000DFE00}, {0x3B, 0x000C0D00}, {0x3B, 0x000B0C00}, {0x3B, 0x000A0500}, {0x3B, 0x00090400}, {0x3B, 0x00080000}, {0x3B, 0x00070F00}, {0x3B, 0x00060E00}, {0x3B, 0x00050A00}, {0x3B, 0x00040D00}, {0x3B, 0x00030C00}, {0x3B, 0x00020500}, {0x3B, 0x00010400}, {0x3B, 0x00000000}, {0xEF, 0x00080000}, {0xEF, 0x00088000}, {0x3B, 0x00000170}, {0x3B, 0x000C0030}, {0xEF, 0x00080000}, {0xEF, 0x00080000}, {0x30, 0x00010000}, {0x31, 0x0000000F}, {0x32, 0x00047EFE}, {0xEF, 0x00000000}, {0x00, 0x00010159}, {0x18, 0x0000FC01}, {0xFE, 0x00000000}, {0x00, 0x00033D95}, {0xff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8710bu_qfn48m_s_radioa_init_table[] = { {0x00, 0x00030000}, {0x08, 0x00008400}, {0x17, 0x00000000}, {0x18, 0x00000C01}, {0x19, 0x000739D2}, {0x1C, 0x00000C4C}, {0x1B, 0x00000C6C}, {0x1E, 0x00080009}, {0x1F, 0x00000880}, {0x2F, 0x0001A060}, {0x3F, 0x00015000}, {0x42, 0x000060C0}, {0x57, 0x000D0000}, {0x58, 0x000C0160}, {0x67, 0x00001552}, {0x83, 0x00000000}, {0xB0, 0x000FF9F0}, {0xB1, 0x00010018}, {0xB2, 0x00054C00}, {0xB4, 0x0004486B}, {0xB5, 0x0000112A}, {0xB6, 0x0000053E}, {0xB7, 0x00014408}, {0xB8, 0x00010200}, {0xB9, 0x00080801}, {0xBA, 0x00040001}, {0xBB, 0x00000400}, {0xBF, 0x000C0000}, {0xC2, 0x00002400}, {0xC3, 0x00000009}, {0xC4, 0x00040C91}, {0xC5, 0x00099999}, {0xC6, 0x000000A3}, {0xC7, 0x00088820}, {0xC8, 0x00076C06}, {0xC9, 0x00000000}, {0xCA, 0x00080000}, {0xDF, 0x00000180}, {0xEF, 0x000001A8}, {0x3D, 0x00000003}, {0x3D, 0x00080003}, {0x51, 0x000F1E69}, {0x52, 0x000FBF6C}, {0x53, 0x0000032F}, {0x54, 0x00055007}, {0x56, 0x000517F0}, {0x35, 0x000000F4}, {0x35, 0x00000179}, {0x35, 0x000002F4}, {0x36, 0x00000BF8}, {0x36, 0x00008BF8}, {0x36, 0x00010BF8}, {0x36, 0x00018BF8}, {0x18, 0x00000C01}, {0x5A, 0x00048000}, {0x5A, 0x00048000}, {0x34, 0x0000ADF5}, {0x34, 0x00009DF2}, {0x34, 0x00008DEF}, {0x34, 0x00007DEC}, {0x34, 0x00006DE9}, {0x34, 0x00005CEC}, {0x34, 0x00004CE9}, {0x34, 0x00003C6C}, {0x34, 0x00002C69}, {0x34, 0x0000106E}, {0x34, 0x0000006B}, {0x84, 0x00048000}, {0x87, 0x00000065}, {0x8E, 0x00065540}, {0xDF, 0x00000110}, {0x86, 0x0000002A}, {0x8F, 0x00088000}, {0x81, 0x0003FD80}, {0xEF, 0x00082000}, {0x3B, 0x000F0F00}, {0x3B, 0x000E0E00}, {0x3B, 0x000DFE00}, {0x3B, 0x000C0D00}, {0x3B, 0x000B0C00}, {0x3B, 0x000A0500}, {0x3B, 0x00090400}, {0x3B, 0x00080000}, {0x3B, 0x00070F00}, {0x3B, 0x00060E00}, {0x3B, 0x00050A00}, {0x3B, 0x00040D00}, {0x3B, 0x00030C00}, {0x3B, 0x00020500}, {0x3B, 0x00010400}, {0x3B, 0x00000000}, {0xEF, 0x00080000}, {0xEF, 0x00088000}, {0x3B, 0x000000B0}, {0x3B, 0x000C0030}, {0xEF, 0x00080000}, {0xEF, 0x00080000}, {0x30, 0x00010000}, {0x31, 0x0000000F}, {0x32, 0x00047EFE}, {0xEF, 0x00000000}, {0x00, 0x00010159}, {0x18, 0x0000FC01}, {0xFE, 0x00000000}, {0x00, 0x00033D95}, {0xff, 0xffffffff} }; static u32 rtl8710b_indirect_read32(struct rtl8xxxu_priv *priv, u32 addr) { struct device *dev = &priv->udev->dev; u32 val32, value = 0xffffffff; u8 polling_count = 0xff; if (!IS_ALIGNED(addr, 4)) { dev_warn(dev, "%s: Aborting because 0x%x is not a multiple of 4.\n", __func__, addr); return value; } mutex_lock(&priv->syson_indirect_access_mutex); rtl8xxxu_write32(priv, REG_USB_HOST_INDIRECT_ADDR_8710B, addr); rtl8xxxu_write32(priv, REG_EFUSE_INDIRECT_CTRL_8710B, NORMAL_REG_READ_OFFSET); do val32 = rtl8xxxu_read32(priv, REG_EFUSE_INDIRECT_CTRL_8710B); while ((val32 & BIT(31)) && (--polling_count > 0)); if (polling_count == 0) dev_warn(dev, "%s: Failed to read from 0x%x, 0x806c = 0x%x\n", __func__, addr, val32); else value = rtl8xxxu_read32(priv, REG_USB_HOST_INDIRECT_DATA_8710B); mutex_unlock(&priv->syson_indirect_access_mutex); if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ) dev_info(dev, "%s(%04x) = 0x%08x\n", __func__, addr, value); return value; } static void rtl8710b_indirect_write32(struct rtl8xxxu_priv *priv, u32 addr, u32 val) { struct device *dev = &priv->udev->dev; u8 polling_count = 0xff; u32 val32; if (!IS_ALIGNED(addr, 4)) { dev_warn(dev, "%s: Aborting because 0x%x is not a multiple of 4.\n", __func__, addr); return; } mutex_lock(&priv->syson_indirect_access_mutex); rtl8xxxu_write32(priv, REG_USB_HOST_INDIRECT_ADDR_8710B, addr); rtl8xxxu_write32(priv, REG_USB_HOST_INDIRECT_DATA_8710B, val); rtl8xxxu_write32(priv, REG_EFUSE_INDIRECT_CTRL_8710B, NORMAL_REG_WRITE_OFFSET); do val32 = rtl8xxxu_read32(priv, REG_EFUSE_INDIRECT_CTRL_8710B); while ((val32 & BIT(31)) && (--polling_count > 0)); if (polling_count == 0) dev_warn(dev, "%s: Failed to write 0x%x to 0x%x, 0x806c = 0x%x\n", __func__, val, addr, val32); mutex_unlock(&priv->syson_indirect_access_mutex); if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) dev_info(dev, "%s(%04x) = 0x%08x\n", __func__, addr, val); } static u32 rtl8710b_read_syson_reg(struct rtl8xxxu_priv *priv, u32 addr) { return rtl8710b_indirect_read32(priv, addr | SYSON_REG_BASE_ADDR_8710B); } static void rtl8710b_write_syson_reg(struct rtl8xxxu_priv *priv, u32 addr, u32 val) { rtl8710b_indirect_write32(priv, addr | SYSON_REG_BASE_ADDR_8710B, val); } static int rtl8710b_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data) { u32 val32; int i; /* Write Address */ rtl8xxxu_write32(priv, REG_USB_HOST_INDIRECT_ADDR_8710B, offset); rtl8xxxu_write32(priv, REG_EFUSE_INDIRECT_CTRL_8710B, EFUSE_READ_OFFSET); /* Poll for data read */ val32 = rtl8xxxu_read32(priv, REG_EFUSE_INDIRECT_CTRL_8710B); for (i = 0; i < RTL8XXXU_MAX_REG_POLL; i++) { val32 = rtl8xxxu_read32(priv, REG_EFUSE_INDIRECT_CTRL_8710B); if (!(val32 & BIT(31))) break; } if (i == RTL8XXXU_MAX_REG_POLL) return -EIO; val32 = rtl8xxxu_read32(priv, REG_USB_HOST_INDIRECT_DATA_8710B); *data = val32 & 0xff; return 0; } #define EEPROM_PACKAGE_TYPE_8710B 0xF8 #define PACKAGE_QFN48M_U 0xee #define PACKAGE_QFN48M_S 0xfe static int rtl8710bu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u32 cfg0, cfg2, vendor; u8 package_type = 0x7; /* a nonsense value */ sprintf(priv->chip_name, "8710BU"); priv->rtl_chip = RTL8710B; priv->rf_paths = 1; priv->rx_paths = 1; priv->tx_paths = 1; priv->has_wifi = 1; cfg0 = rtl8710b_read_syson_reg(priv, REG_SYS_SYSTEM_CFG0_8710B); priv->chip_cut = cfg0 & 0xf; if (cfg0 & BIT(16)) { dev_info(dev, "%s: Unsupported test chip\n", __func__); return -EOPNOTSUPP; } vendor = u32_get_bits(cfg0, 0xc0); /* SMIC and TSMC are swapped compared to rtl8xxxu_identify_vendor_2bits */ switch (vendor) { case 0: sprintf(priv->chip_vendor, "SMIC"); priv->vendor_smic = 1; break; case 1: sprintf(priv->chip_vendor, "TSMC"); break; case 2: sprintf(priv->chip_vendor, "UMC"); priv->vendor_umc = 1; break; default: sprintf(priv->chip_vendor, "unknown"); break; } rtl8710b_read_efuse8(priv, EEPROM_PACKAGE_TYPE_8710B, &package_type); if (package_type == 0xff) { dev_warn(dev, "Package type is undefined. Assuming it based on the vendor.\n"); if (priv->vendor_umc) { package_type = PACKAGE_QFN48M_U; } else if (priv->vendor_smic) { package_type = PACKAGE_QFN48M_S; } else { dev_warn(dev, "The vendor is neither UMC nor SMIC. Assuming the package type is QFN48M_U.\n"); /* * In this case the vendor driver doesn't set * the package type to anything, which is the * same as setting it to PACKAGE_DEFAULT (0). */ package_type = PACKAGE_QFN48M_U; } } else if (package_type != PACKAGE_QFN48M_S && package_type != PACKAGE_QFN48M_U) { dev_warn(dev, "Failed to read the package type. Assuming it's the default QFN48M_U.\n"); /* * In this case the vendor driver actually sets it to * PACKAGE_DEFAULT, but that selects the same values * from the init tables as PACKAGE_QFN48M_U. */ package_type = PACKAGE_QFN48M_U; } priv->package_type = package_type; dev_dbg(dev, "Package type: 0x%x\n", package_type); cfg2 = rtl8710b_read_syson_reg(priv, REG_SYS_SYSTEM_CFG2_8710B); priv->rom_rev = cfg2 & 0xf; return rtl8xxxu_config_endpoints_no_sie(priv); } static void rtl8710b_revise_cck_tx_psf(struct rtl8xxxu_priv *priv, u8 channel) { if (channel == 13) { /* Normal values */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER2, 0x64B80C1C); rtl8xxxu_write32(priv, REG_CCK0_DEBUG_PORT, 0x00008810); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER3, 0x01235667); /* Special value for channel 13 */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER1, 0xd1d80001); } else if (channel == 14) { /* Special values for channel 14 */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER2, 0x0000B81C); rtl8xxxu_write32(priv, REG_CCK0_DEBUG_PORT, 0x00000000); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER3, 0x00003667); /* Normal value */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER1, 0xE82C0001); } else { /* Restore normal values from the phy init table */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER2, 0x64B80C1C); rtl8xxxu_write32(priv, REG_CCK0_DEBUG_PORT, 0x00008810); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER3, 0x01235667); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER1, 0xE82C0001); } } static void rtl8710bu_config_channel(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; bool ht40 = conf_is_ht40(&hw->conf); u8 channel, subchannel = 0; bool sec_ch_above = 0; u32 val32; u16 val16; channel = (u8)hw->conf.chandef.chan->hw_value; if (conf_is_ht40_plus(&hw->conf)) { sec_ch_above = 1; channel += 2; subchannel = 2; } else if (conf_is_ht40_minus(&hw->conf)) { sec_ch_above = 0; channel -= 2; subchannel = 1; } /* Set channel */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); u32p_replace_bits(&val32, channel, MODE_AG_CHANNEL_MASK); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); rtl8710b_revise_cck_tx_psf(priv, channel); /* Set bandwidth mode */ val16 = rtl8xxxu_read16(priv, REG_WMAC_TRXPTCL_CTL); val16 &= ~WMAC_TRXPTCL_CTL_BW_MASK; if (ht40) val16 |= WMAC_TRXPTCL_CTL_BW_40; rtl8xxxu_write16(priv, REG_WMAC_TRXPTCL_CTL, val16); rtl8xxxu_write8(priv, REG_DATA_SUBCHANNEL, subchannel); val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); u32p_replace_bits(&val32, ht40, FPGA_RF_MODE); rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE); u32p_replace_bits(&val32, ht40, FPGA_RF_MODE); rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32); if (ht40) { /* Set Control channel to upper or lower. */ val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM); u32p_replace_bits(&val32, !sec_ch_above, CCK0_SIDEBAND); rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32); } /* RXADC CLK */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); val32 |= GENMASK(10, 8); rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); /* TXDAC CLK */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); val32 |= BIT(14) | BIT(12); val32 &= ~BIT(13); rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); /* small BW */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT); val32 &= ~GENMASK(31, 30); rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32); /* adc buffer clk */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT); val32 &= ~BIT(29); val32 |= BIT(28); rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32); /* adc buffer clk */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_AFE); val32 &= ~BIT(29); val32 |= BIT(28); rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_AFE, val32); val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE); val32 &= ~BIT(30); val32 |= BIT(29); rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32); if (ht40) { val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 &= ~BIT(19); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 &= ~GENMASK(23, 20); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 &= ~GENMASK(27, 24); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); /* RF TRX_BW */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); val32 &= ~MODE_AG_BW_MASK; val32 |= MODE_AG_BW_40MHZ_8723B; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); } else { val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 |= BIT(19); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 &= ~GENMASK(23, 20); val32 |= BIT(23); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 &= ~GENMASK(27, 24); val32 |= BIT(27) | BIT(25); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); /* RF TRX_BW */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); val32 &= ~MODE_AG_BW_MASK; val32 |= MODE_AG_BW_20MHZ_8723B; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); } } static void rtl8710bu_init_aggregation(struct rtl8xxxu_priv *priv) { u32 agg_rx; u8 agg_ctrl; /* RX aggregation */ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL); agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN; agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH); agg_rx &= ~RXDMA_USB_AGG_ENABLE; agg_rx &= ~0xFF0F; /* reset agg size and timeout */ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx); } static void rtl8710bu_init_statistics(struct rtl8xxxu_priv *priv) { u32 val32; /* Time duration for NHM unit: 4us, 0xc350=200ms */ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0xc350); rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff); rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff50); rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff); /* TH8 */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 |= 0xff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Enable CCK */ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B); val32 &= ~(BIT(8) | BIT(9) | BIT(10)); val32 |= BIT(8); rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32); /* Max power amongst all RX antennas */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC); val32 |= BIT(7); rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } static int rtl8710b_read_efuse(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u8 val8, word_mask, header, extheader; u16 efuse_addr, offset; int i, ret = 0; u32 val32; val32 = rtl8710b_read_syson_reg(priv, REG_SYS_EEPROM_CTRL0_8710B); priv->boot_eeprom = u32_get_bits(val32, EEPROM_BOOT); priv->has_eeprom = u32_get_bits(val32, EEPROM_ENABLE); /* Default value is 0xff */ memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN); efuse_addr = 0; while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) { u16 map_addr; ret = rtl8710b_read_efuse8(priv, efuse_addr++, &header); if (ret || header == 0xff) goto exit; if ((header & 0x1f) == 0x0f) { /* extended header */ offset = (header & 0xe0) >> 5; ret = rtl8710b_read_efuse8(priv, efuse_addr++, &extheader); if (ret) goto exit; /* All words disabled */ if ((extheader & 0x0f) == 0x0f) continue; offset |= ((extheader & 0xf0) >> 1); word_mask = extheader & 0x0f; } else { offset = (header >> 4) & 0x0f; word_mask = header & 0x0f; } /* Get word enable value from PG header */ /* We have 8 bits to indicate validity */ map_addr = offset * 8; for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { /* Check word enable condition in the section */ if (word_mask & BIT(i)) { map_addr += 2; continue; } ret = rtl8710b_read_efuse8(priv, efuse_addr++, &val8); if (ret) goto exit; if (map_addr >= EFUSE_MAP_LEN - 1) { dev_warn(dev, "%s: Illegal map_addr (%04x), efuse corrupt!\n", __func__, map_addr); ret = -EINVAL; goto exit; } priv->efuse_wifi.raw[map_addr++] = val8; ret = rtl8710b_read_efuse8(priv, efuse_addr++, &val8); if (ret) goto exit; priv->efuse_wifi.raw[map_addr++] = val8; } } exit: return ret; } static int rtl8710bu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8710bu_efuse *efuse = &priv->efuse_wifi.efuse8710bu; if (efuse->rtl_id != cpu_to_le16(0x8195)) return -EINVAL; ether_addr_copy(priv->mac_addr, efuse->mac_addr); memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base, sizeof(efuse->tx_power_index_A.cck_base)); memcpy(priv->ht40_1s_tx_power_index_A, efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; priv->default_crystal_cap = efuse->xtal_k & 0x3f; return 0; } static int rtl8710bu_load_firmware(struct rtl8xxxu_priv *priv) { if (priv->vendor_smic) { return rtl8xxxu_load_firmware(priv, "rtlwifi/rtl8710bufw_SMIC.bin"); } else if (priv->vendor_umc) { return rtl8xxxu_load_firmware(priv, "rtlwifi/rtl8710bufw_UMC.bin"); } else { dev_err(&priv->udev->dev, "We have no suitable firmware for this chip.\n"); return -1; } } static void rtl8710bu_init_phy_bb(struct rtl8xxxu_priv *priv) { const struct rtl8xxxu_reg32val *phy_init_table; u32 val32; /* Enable BB and RF */ val32 = rtl8xxxu_read32(priv, REG_SYS_FUNC_8710B); val32 |= GENMASK(17, 16) | GENMASK(26, 24); rtl8xxxu_write32(priv, REG_SYS_FUNC_8710B, val32); if (priv->package_type == PACKAGE_QFN48M_U) phy_init_table = rtl8710bu_qfn48m_u_phy_init_table; else phy_init_table = rtl8710bu_qfn48m_s_phy_init_table; rtl8xxxu_init_phy_regs(priv, phy_init_table); rtl8xxxu_init_phy_regs(priv, rtl8710b_agc_table); } static int rtl8710bu_init_phy_rf(struct rtl8xxxu_priv *priv) { const struct rtl8xxxu_rfregval *radioa_init_table; if (priv->package_type == PACKAGE_QFN48M_U) radioa_init_table = rtl8710bu_qfn48m_u_radioa_init_table; else radioa_init_table = rtl8710bu_qfn48m_s_radioa_init_table; return rtl8xxxu_init_phy_rf(priv, radioa_init_table, RF_A); } static int rtl8710bu_iqk_path_a(struct rtl8xxxu_priv *priv, u32 *lok_result) { u32 reg_eac, reg_e94, reg_e9c, val32, path_sel_bb; int result = 0; path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x99000000); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Enable path A PA in TX IQK mode */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0x07ff7); /* PA,PAD gain adjust */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA); val32 |= BIT(11); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_PAD_TXG); u32p_replace_bits(&val32, 0x1ed, 0x00fff); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_PAD_TXG, val32); /* enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0x808000, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ff); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c06); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x02002911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(10); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA); val32 &= ~BIT(11); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, val32); /* save LOK result */ *lok_result = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_TXM_IDAC); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000)) result |= 0x01; return result; } static int rtl8710bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv, u32 lok_result) { u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32, path_sel_bb, tmp; int result = 0; path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x99000000); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* modify RXIQK mode table */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf1173); /* PA,PAD gain adjust */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA); val32 |= BIT(11); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_PAD_TXG); u32p_replace_bits(&val32, 0xf, 0x003e0); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_PAD_TXG, val32); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0x808000, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x8216129f); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c00); /* * Tx IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(10); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000)) { result |= 0x01; } else { /* If TX not OK, ignore RX */ /* reload RF path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA); val32 &= ~BIT(11); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, val32); return result; } val32 = 0x80007c00 | (reg_e94 & 0x3ff0000) | ((reg_e9c & 0x3ff0000) >> 16); rtl8xxxu_write32(priv, REG_TX_IQK, val32); /* * Modify RX IQK mode table */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ff2); /* * PA, PAD setting */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA); val32 |= BIT(11); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_PAD_TXG); u32p_replace_bits(&val32, 0x2a, 0x00fff); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_PAD_TXG, val32); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0x808000, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * RX IQK setting */ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816169f); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(10); /* reload RF path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA); val32 &= ~BIT(11); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, val32); /* reload LOK value */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXM_IDAC, lok_result); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); tmp = (reg_eac & 0x03ff0000) >> 16; if ((tmp & 0x200) > 0) tmp = 0x400 - tmp; if (!(reg_eac & BIT(27)) && ((reg_ea4 & 0x03ff0000) != 0x01320000) && ((reg_eac & 0x03ff0000) != 0x00360000) && (((reg_ea4 & 0x03ff0000) >> 16) < 0x11a) && (((reg_ea4 & 0x03ff0000) >> 16) > 0xe6) && (tmp < 0x1a)) result |= 0x02; return result; } static void rtl8710bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, int result[][8], int t) { struct device *dev = &priv->udev->dev; u32 i, val32, rx_initial_gain, lok_result; u32 path_sel_bb, path_sel_rf; int path_a_ok; int retry = 2; static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = { REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH, REG_RX_WAIT_CCA, REG_TX_CCK_RFON, REG_TX_CCK_BBON, REG_TX_OFDM_RFON, REG_TX_OFDM_BBON, REG_TX_TO_RX, REG_TX_TO_TX, REG_RX_CCK, REG_RX_OFDM, REG_RX_WAIT_RIFS, REG_RX_TO_RX, REG_STANDBY, REG_SLEEP, REG_PMPD_ANAEN }; static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { REG_TXPAUSE, REG_BEACON_CTRL, REG_BEACON_CTRL_1, REG_GPIO_MUXCFG }; static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE, REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING }; /* * Note: IQ calibration must be performed after loading * PHY_REG.txt , and radio_a, radio_b.txt */ rx_initial_gain = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); if (t == 0) { /* Save ADDA parameters, turn Path A ADDA on */ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); rtl8xxxu_save_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); } rtl8xxxu_path_adda_on(priv, adda_regs, true); if (t == 0) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1); priv->pi_enabled = u32_get_bits(val32, FPGA0_HSSI_PARM1_PI); } if (!priv->pi_enabled) { /* Switch BB to PI mode to do IQ Calibration */ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100); rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100); } /* MAC settings */ val32 = rtl8xxxu_read32(priv, REG_TX_PTCL_CTRL); val32 |= 0x00ff0000; rtl8xxxu_write32(priv, REG_TX_PTCL_CTRL, val32); /* save RF path */ path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); path_sel_rf = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_S0S1); /* BB setting */ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); val32 |= 0x0f000000; rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x03c00010); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05601); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x25204000); /* IQ calibration setting */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0x808000, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); for (i = 0; i < retry; i++) { path_a_ok = rtl8710bu_iqk_path_a(priv, &lok_result); if (path_a_ok == 0x01) { val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); result[t][0] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); result[t][1] = (val32 >> 16) & 0x3ff; break; } else { result[t][0] = 0x100; result[t][1] = 0x0; } } for (i = 0; i < retry; i++) { path_a_ok = rtl8710bu_rx_iqk_path_a(priv, lok_result); if (path_a_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); result[t][2] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); result[t][3] = (val32 >> 16) & 0x3ff; break; } else { result[t][2] = 0x100; result[t][3] = 0x0; } } if (!path_a_ok) dev_warn(dev, "%s: Path A IQK failed!\n", __func__); /* Back to BB mode, load original value */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); u32p_replace_bits(&val32, 0, 0xffffff00); rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); if (t == 0) return; /* Reload ADDA power saving parameters */ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); /* Reload MAC parameters */ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); /* Reload BB parameters */ rtl8xxxu_restore_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); /* Reload RF path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, path_sel_rf); /* Restore RX initial gain */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); u32p_replace_bits(&val32, 0x50, 0x000000ff); rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); u32p_replace_bits(&val32, rx_initial_gain & 0xff, 0x000000ff); rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32); /* Load 0xe30 IQC default value */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00); } static void rtl8710bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; int result[4][8]; /* last is final result */ int i, candidate; bool path_a_ok; s32 reg_e94, reg_e9c, reg_ea4, reg_eac; s32 reg_tmp = 0; bool simu; u32 path_sel_bb; /* Save RF path */ path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); memset(result, 0, sizeof(result)); candidate = -1; path_a_ok = false; for (i = 0; i < 3; i++) { rtl8710bu_phy_iqcalibrate(priv, result, i); if (i == 1) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 1); if (simu) { candidate = 0; break; } } if (i == 2) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 2); if (simu) { candidate = 0; break; } simu = rtl8xxxu_gen2_simularity_compare(priv, result, 1, 2); if (simu) { candidate = 1; } else { for (i = 0; i < 8; i++) reg_tmp += result[3][i]; if (reg_tmp) candidate = 3; else candidate = -1; } } } if (candidate >= 0) { reg_e94 = result[candidate][0]; reg_e9c = result[candidate][1]; reg_ea4 = result[candidate][2]; reg_eac = result[candidate][3]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, "%s: e94=%x e9c=%x ea4=%x eac=%x\n", __func__, reg_e94, reg_e9c, reg_ea4, reg_eac); path_a_ok = true; if (reg_e94) rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, candidate, (reg_ea4 == 0)); } rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb); } static int rtl8710b_emu_to_active(struct rtl8xxxu_priv *priv) { u8 val8; int count, ret = 0; /* AFE power mode selection: 1: LDO mode, 0: Power-cut mode */ val8 = rtl8xxxu_read8(priv, 0x5d); val8 &= ~BIT(0); rtl8xxxu_write8(priv, 0x5d, val8); val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC_8710B); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_SYS_FUNC_8710B, val8); rtl8xxxu_write8(priv, 0x56, 0x0e); val8 = rtl8xxxu_read8(priv, 0x20); val8 |= BIT(0); rtl8xxxu_write8(priv, 0x20, val8); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val8 = rtl8xxxu_read8(priv, 0x20); if (!(val8 & BIT(0))) break; udelay(10); } if (!count) ret = -EBUSY; return ret; } static int rtl8710bu_active_to_emu(struct rtl8xxxu_priv *priv) { u8 val8; u32 val32; int count, ret = 0; /* Turn off RF */ val32 = rtl8xxxu_read32(priv, REG_SYS_FUNC_8710B); val32 &= ~GENMASK(26, 24); rtl8xxxu_write32(priv, REG_SYS_FUNC_8710B, val32); /* BB reset */ val32 = rtl8xxxu_read32(priv, REG_SYS_FUNC_8710B); val32 &= ~GENMASK(17, 16); rtl8xxxu_write32(priv, REG_SYS_FUNC_8710B, val32); /* Turn off MAC by HW state machine */ val8 = rtl8xxxu_read8(priv, 0x20); val8 |= BIT(1); rtl8xxxu_write8(priv, 0x20, val8); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val8 = rtl8xxxu_read8(priv, 0x20); if ((val8 & BIT(1)) == 0) { ret = 0; break; } udelay(10); } if (!count) ret = -EBUSY; return ret; } static int rtl8710bu_active_to_lps(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u8 val8; u16 val16; u32 val32; int retry, retval; /* Tx Pause */ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); retry = 100; retval = -EBUSY; /* * Poll 32 bit wide REG_SCH_TX_CMD for 0x00000000 to ensure no TX is pending. */ do { val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD); if (!val32) { retval = 0; break; } udelay(10); } while (retry--); if (!retry) { dev_warn(dev, "Failed to flush TX queue\n"); retval = -EBUSY; return retval; } /* Disable CCK and OFDM, clock gated */ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); val8 &= ~SYS_FUNC_BBRSTB; rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); udelay(2); /* Whole BB is reset */ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); val8 &= ~SYS_FUNC_BB_GLB_RSTN; rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); /* Reset MAC TRX */ val16 = rtl8xxxu_read16(priv, REG_CR); val16 &= 0xff00; val16 |= CR_HCI_RXDMA_ENABLE | CR_HCI_TXDMA_ENABLE; val16 &= ~CR_SECURITY_ENABLE; rtl8xxxu_write16(priv, REG_CR, val16); /* Respond TxOK to scheduler */ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST); val8 |= DUAL_TSF_TX_OK; rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8); return retval; } static int rtl8710bu_power_on(struct rtl8xxxu_priv *priv) { u32 val32; u16 val16; u8 val8; int ret; rtl8xxxu_write8(priv, REG_USB_ACCESS_TIMEOUT, 0x80); val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); val8 &= ~BIT(5); rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC_8710B); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_SYS_FUNC_8710B, val8); val8 = rtl8xxxu_read8(priv, 0x20); val8 |= BIT(0); rtl8xxxu_write8(priv, 0x20, val8); rtl8xxxu_write8(priv, REG_AFE_CTRL_8710B, 0); val8 = rtl8xxxu_read8(priv, REG_WL_STATUS_8710B); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_WL_STATUS_8710B, val8); ret = rtl8710b_emu_to_active(priv); if (ret) return ret; rtl8xxxu_write16(priv, REG_CR, 0); val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE; rtl8xxxu_write16(priv, REG_CR, val16); /* Enable hardware sequence number. */ val8 = rtl8xxxu_read8(priv, REG_HWSEQ_CTRL); val8 |= 0x7f; rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, val8); udelay(2); /* * Technically the rest was in the rtl8710bu_hal_init function, * not the power_on function, but it's fine because we only * call power_on from init_device. */ val8 = rtl8xxxu_read8(priv, 0xfef9); val8 &= ~BIT(0); rtl8xxxu_write8(priv, 0xfef9, val8); /* Clear the 0x40000138[5] to prevent CM4 Suspend */ val32 = rtl8710b_read_syson_reg(priv, 0x138); val32 &= ~BIT(5); rtl8710b_write_syson_reg(priv, 0x138, val32); return ret; } static void rtl8710bu_power_off(struct rtl8xxxu_priv *priv) { u32 val32; u8 val8; rtl8xxxu_flush_fifo(priv); rtl8xxxu_write32(priv, REG_HISR0_8710B, 0xffffffff); rtl8xxxu_write32(priv, REG_HIMR0_8710B, 0x0); /* Set the 0x40000138[5] to allow CM4 Suspend */ val32 = rtl8710b_read_syson_reg(priv, 0x138); val32 |= BIT(5); rtl8710b_write_syson_reg(priv, 0x138, val32); /* Stop rx */ rtl8xxxu_write8(priv, REG_CR, 0x00); rtl8710bu_active_to_lps(priv); /* Reset MCU ? */ val8 = rtl8xxxu_read8(priv, REG_8051FW_CTRL_V1_8710B + 3); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_8051FW_CTRL_V1_8710B + 3, val8); /* Reset MCU ready status */ rtl8xxxu_write8(priv, REG_8051FW_CTRL_V1_8710B, 0x00); rtl8710bu_active_to_emu(priv); } static void rtl8710b_reset_8051(struct rtl8xxxu_priv *priv) { u8 val8; val8 = rtl8xxxu_read8(priv, REG_8051FW_CTRL_V1_8710B + 3); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_8051FW_CTRL_V1_8710B + 3, val8); udelay(50); val8 = rtl8xxxu_read8(priv, REG_8051FW_CTRL_V1_8710B + 3); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_8051FW_CTRL_V1_8710B + 3, val8); } static void rtl8710b_enable_rf(struct rtl8xxxu_priv *priv) { u32 val32; rtl8xxxu_write8(priv, REG_RF_CTRL, RF_ENABLE | RF_RSTB | RF_SDMRSTB); val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK); val32 |= OFDM_RF_PATH_RX_A | OFDM_RF_PATH_TX_A; rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } static void rtl8710b_disable_rf(struct rtl8xxxu_priv *priv) { u32 val32; val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); val32 &= ~OFDM_RF_PATH_TX_MASK; rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); /* Power down RF module */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0); } static void rtl8710b_usb_quirks(struct rtl8xxxu_priv *priv) { u16 val16; rtl8xxxu_gen2_usb_quirks(priv); val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= (CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE); rtl8xxxu_write16(priv, REG_CR, val16); } #define XTAL1 GENMASK(29, 24) #define XTAL0 GENMASK(23, 18) static void rtl8710b_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap) { struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking; u32 val32; if (crystal_cap == cfo->crystal_cap) return; val32 = rtl8710b_read_syson_reg(priv, REG_SYS_XTAL_CTRL0_8710B); dev_dbg(&priv->udev->dev, "%s: Adjusting crystal cap from 0x%x (actually 0x%x 0x%x) to 0x%x\n", __func__, cfo->crystal_cap, u32_get_bits(val32, XTAL1), u32_get_bits(val32, XTAL0), crystal_cap); u32p_replace_bits(&val32, crystal_cap, XTAL1); u32p_replace_bits(&val32, crystal_cap, XTAL0); rtl8710b_write_syson_reg(priv, REG_SYS_XTAL_CTRL0_8710B, val32); cfo->crystal_cap = crystal_cap; } static s8 rtl8710b_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_stats *phy_stats) { struct jaguar2_phy_stats_type0 *phy_stats0 = (struct jaguar2_phy_stats_type0 *)phy_stats; u8 lna_idx = (phy_stats0->lna_h << 3) | phy_stats0->lna_l; u8 vga_idx = phy_stats0->vga; s8 rx_pwr_all = 0x00; switch (lna_idx) { case 7: rx_pwr_all = -52 - (2 * vga_idx); break; case 6: rx_pwr_all = -42 - (2 * vga_idx); break; case 5: rx_pwr_all = -36 - (2 * vga_idx); break; case 3: rx_pwr_all = -12 - (2 * vga_idx); break; case 2: rx_pwr_all = 0 - (2 * vga_idx); break; default: rx_pwr_all = 0; break; } return rx_pwr_all; } struct rtl8xxxu_fileops rtl8710bu_fops = { .identify_chip = rtl8710bu_identify_chip, .parse_efuse = rtl8710bu_parse_efuse, .load_firmware = rtl8710bu_load_firmware, .power_on = rtl8710bu_power_on, .power_off = rtl8710bu_power_off, .read_efuse = rtl8710b_read_efuse, .reset_8051 = rtl8710b_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .init_phy_bb = rtl8710bu_init_phy_bb, .init_phy_rf = rtl8710bu_init_phy_rf, .phy_lc_calibrate = rtl8188f_phy_lc_calibrate, .phy_iq_calibrate = rtl8710bu_phy_iq_calibrate, .config_channel = rtl8710bu_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .parse_phystats = jaguar2_rx_parse_phystats, .init_aggregation = rtl8710bu_init_aggregation, .init_statistics = rtl8710bu_init_statistics, .init_burst = rtl8xxxu_init_burst, .enable_rf = rtl8710b_enable_rf, .disable_rf = rtl8710b_disable_rf, .usb_quirks = rtl8710b_usb_quirks, .set_tx_power = rtl8188f_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, .report_connect = rtl8xxxu_gen2_report_connect, .report_rssi = rtl8xxxu_gen2_report_rssi, .fill_txdesc = rtl8xxxu_fill_txdesc_v2, .set_crystal_cap = rtl8710b_set_crystal_cap, .cck_rssi = rtl8710b_cck_rssi, .writeN_block_size = 4, .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .has_tx_report = 1, .gen2_thermal_meter = 1, .needs_full_init = 1, .init_reg_rxfltmap = 1, .init_reg_pkt_life_time = 1, .init_reg_hmtfr = 1, .ampdu_max_time = 0x5e, /* * The RTL8710BU vendor driver uses 0x50 here and it works fine, * but in rtl8xxxu 0x50 causes slow upload and random packet loss. Why? */ .ustime_tsf_edca = 0x28, .max_aggr_num = 0x0c14, .supports_ap = 1, .max_macid_num = 16, .max_sec_cam_num = 32, .adda_1t_init = 0x03c00016, .adda_1t_path_on = 0x03c00016, .trxff_boundary = 0x3f7f, .pbp_rx = PBP_PAGE_SIZE_256, .pbp_tx = PBP_PAGE_SIZE_256, .mactable = rtl8710b_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM_8723B, .page_num_hi = TX_PAGE_NUM_HI_PQ_8723B, .page_num_lo = TX_PAGE_NUM_LO_PQ_8723B, .page_num_norm = TX_PAGE_NUM_NORM_PQ_8723B, }; |
2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some sunplus "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static const __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && rdesc[106] == 0x03) { hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n"); rdesc[105] = rdesc[110] = 0x03; rdesc[106] = rdesc[111] = 0x21; } return rdesc; } #define sp_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int sp_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x2003: sp_map_key_clear(KEY_ZOOMIN); break; case 0x2103: sp_map_key_clear(KEY_ZOOMOUT); break; default: return 0; } return 1; } static const struct hid_device_id sp_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, { } }; MODULE_DEVICE_TABLE(hid, sp_devices); static struct hid_driver sp_driver = { .name = "sunplus", .id_table = sp_devices, .report_fixup = sp_report_fixup, .input_mapping = sp_input_mapping, }; module_hid_driver(sp_driver); MODULE_DESCRIPTION("HID driver for some sunplus \"special\" devices"); MODULE_LICENSE("GPL"); |
435 802 87 788 855 855 17 91 91 2 111 10 23 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_H #define _LINUX_RCULIST_H #ifdef __KERNEL__ /* * RCU-protected list version */ #include <linux/list.h> #include <linux/rcupdate.h> /* * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized * * You should instead use INIT_LIST_HEAD() for normal initialization and * cleanup tasks, when readers have no access to the list being initialized. * However, if the list being initialized is visible to readers, you * need to keep the compiler from being too mischievous. */ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) { WRITE_ONCE(list->next, list); WRITE_ONCE(list->prev, list); } /* * return the ->next pointer of a list_head in an rcu safe * way, we must not access it directly */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) /** * list_tail_rcu - returns the prev pointer of the head of the list * @head: the head of the list * * Note: This should only be used with the list header, and even then * only if list_del() and similar primitives are not also used on the * list header. */ #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) /* * Check during list traversal that we are within an RCU reader */ #define check_arg_count_one(dummy) #ifdef CONFIG_PROVE_RCU_LIST #define __list_check_rcu(dummy, cond, extra...) \ ({ \ check_arg_count_one(extra); \ RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ "RCU-list traversed in non-reader section!"); \ }) #define __list_check_srcu(cond) \ ({ \ RCU_LOCKDEP_WARN(!(cond), \ "RCU-list traversed without holding the required lock!");\ }) #else #define __list_check_rcu(dummy, cond, extra...) \ ({ check_arg_count_one(extra); }) #define __list_check_srcu(cond) ({ }) #endif /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { if (!__list_add_valid(new, prev, next)) return; new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } /** * list_add_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head, head->next); } /** * list_add_tail_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_tail_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_tail_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head->prev, head); } /** * list_del_rcu - deletes entry from list without re-initialization * @entry: the element to delete from the list. * * Note: list_empty() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_del_rcu() * or list_add_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). * * Note that the caller is not permitted to immediately free * the newly deleted entry. Instead, either synchronize_rcu() * or call_rcu() must be used to defer freeing until an RCU * grace period has elapsed. */ static inline void list_del_rcu(struct list_head *entry) { __list_del_entry(entry); entry->prev = LIST_POISON2; } /** * hlist_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on the node return true after this. It is * useful for RCU based read lockfree traversal if the writer side * must know if the list entry is still hashed or already unhashed. * * In particular, it means that we can not poison the forward pointers * that may still be used for walking the hash list and we can only * zero the pprev pointer so list_unhashed() will return true after * this. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another * list-mutation primitive, such as hlist_add_head_rcu() or * hlist_del_rcu(), running on this same list. However, it is * perfectly legal to run concurrently with the _rcu list-traversal * primitives, such as hlist_for_each_entry_rcu(). */ static inline void hlist_del_init_rcu(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); WRITE_ONCE(n->pprev, NULL); } } /** * list_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically from * the perspective of concurrent readers. It is the caller's responsibility * to synchronize with concurrent updaters, if any. * * Note: @old should not be empty. */ static inline void list_replace_rcu(struct list_head *old, struct list_head *new) { new->next = old->next; new->prev = old->prev; rcu_assign_pointer(list_next_rcu(new->prev), new); new->next->prev = new; old->prev = LIST_POISON2; } /** * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice * @prev: points to the last element of the existing list * @next: points to the first element of the existing list * @sync: synchronize_rcu, synchronize_rcu_expedited, ... * * The list pointed to by @prev and @next can be RCU-read traversed * concurrently with this function. * * Note that this function blocks. * * Important note: the caller must take whatever action is necessary to prevent * any other updates to the existing list. In principle, it is possible to * modify the list as soon as sync() begins execution. If this sort of thing * becomes necessary, an alternative version based on call_rcu() could be * created. But only if -really- needed -- there is no shortage of RCU API * members. */ static inline void __list_splice_init_rcu(struct list_head *list, struct list_head *prev, struct list_head *next, void (*sync)(void)) { struct list_head *first = list->next; struct list_head *last = list->prev; /* * "first" and "last" tracking list, so initialize it. RCU readers * have access to this list, so we must use INIT_LIST_HEAD_RCU() * instead of INIT_LIST_HEAD(). */ INIT_LIST_HEAD_RCU(list); /* * At this point, the list body still points to the source list. * Wait for any readers to finish using the list before splicing * the list body into the new list. Any new readers will see * an empty list. */ sync(); ASSERT_EXCLUSIVE_ACCESS(*first); ASSERT_EXCLUSIVE_ACCESS(*last); /* * Readers are finished with the source list, so perform splice. * The order is important if the new list is global and accessible * to concurrent RCU readers. Note that RCU readers are not * permitted to traverse the prev pointers without excluding * this function. */ last->next = next; rcu_assign_pointer(list_next_rcu(prev), first); first->prev = prev; next->prev = last; } /** * list_splice_init_rcu - splice an RCU-protected list into an existing list, * designed for stacks. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head, head->next, sync); } /** * list_splice_tail_init_rcu - splice an RCU-protected list into an existing * list, designed for queues. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_tail_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head->prev, head, sync); } /** * list_entry_rcu - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ container_of(READ_ONCE(ptr), type, member) /* * Where are list_empty_rcu() and list_first_entry_rcu()? * * They do not exist because they would lead to subtle race conditions: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * * The list might be non-empty when list_empty_rcu() checks it, but it * might have become empty by the time that list_first_entry_rcu() rereads * the ->next pointer, which would result in a SEGV. * * When not using RCU, it is OK for list_first_entry() to re-read that * pointer because both functions should be protected by some lock that * blocks writers. * * When using RCU, list_empty() uses READ_ONCE() to fetch the * RCU-protected ->next pointer and then compares it to the address of the * list head. However, it neither dereferences this pointer nor provides * this pointer to its caller. Thus, READ_ONCE() suffices (that is, * rcu_dereference() is not needed), which means that list_empty() can be * used anywhere you would want to use list_empty_rcu(). Just don't * expect anything useful to happen if you do a subsequent lockless * call to list_first_entry_rcu()!!! * * See list_first_or_null_rcu for an alternative. */ /** * list_first_or_null_rcu - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_first_or_null_rcu(ptr, type, member) \ ({ \ struct list_head *__ptr = (ptr); \ struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ }) /** * list_next_or_null_rcu - get the next element from a list * @head: the head for the list. * @ptr: the list head to take the next element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the ptr is at the end of the list, NULL is returned. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_next_or_null_rcu(head, ptr, type, member) \ ({ \ struct list_head *__head = (head); \ struct list_head *__ptr = (ptr); \ struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__next != __head) ? list_entry_rcu(__next, type, \ member) : NULL; \ }) /** * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_entry_rcu(pos, head, member, cond...) \ for (__list_check_rcu(dummy, ## cond, 0), \ pos = list_entry_rcu((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_srcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * @cond: lockdep expression for the lock required to traverse the list. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by srcu_read_lock(). * The lockdep expression srcu_read_lock_held() can be passed as the * cond argument from read side. */ #define list_for_each_entry_srcu(pos, head, member, cond) \ for (__list_check_srcu(cond), \ pos = list_entry_rcu((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_entry_lockless - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu * list-mutation primitives such as list_add_rcu(), but requires some * implicit RCU read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where lockdep * cannot be invoked. Another example is when items are added to the list, * but never deleted. */ #define list_entry_lockless(ptr, type, member) \ container_of((typeof(ptr))READ_ONCE(ptr), type, member) /** * list_for_each_entry_lockless - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. * * This primitive may safely run concurrently with the _rcu * list-mutation primitives such as list_add_rcu(), but requires some * implicit RCU read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where lockdep * cannot be invoked. Another example is when items are added to the list, * but never deleted. */ #define list_for_each_entry_lockless(pos, head, member) \ for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position which must have been in the list when the RCU read * lock was taken. * This would typically require either that you obtained the node from a * previous walk of the list in the same RCU read-side critical section, or * that you held some sort of non-RCU reference (such as a reference count) * to keep the node alive *and* in the list. * * This iterator is similar to list_for_each_entry_from_rcu() except * this starts after the given position and that one starts at the given * position. */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_from_rcu - iterate over a list from current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_node within the struct. * * Iterate over the tail of a list starting from a given position, * which must have been in the list when the RCU read lock was taken. * This would typically require either that you obtained the node from a * previous walk of the list in the same RCU read-side critical section, or * that you held some sort of non-RCU reference (such as a reference count) * to keep the node alive *and* in the list. * * This iterator is similar to list_for_each_entry_continue_rcu() except * this starts from the given position and that one starts from the position * after the given position. */ #define list_for_each_entry_from_rcu(pos, head, member) \ for (; &(pos)->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) /** * hlist_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry(). */ static inline void hlist_del_rcu(struct hlist_node *n) { __hlist_del(n); WRITE_ONCE(n->pprev, LIST_POISON2); } /** * hlist_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically from * the perspective of concurrent readers. It is the caller's responsibility * to synchronize with concurrent updaters, if any. */ static inline void hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) { struct hlist_node *next = old->next; new->next = next; WRITE_ONCE(new->pprev, old->pprev); rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); if (next) WRITE_ONCE(new->next->pprev, &new->next); WRITE_ONCE(old->pprev, LIST_POISON2); } /** * hlists_swap_heads_rcu - swap the lists the hlist heads point to * @left: The hlist head on the left * @right: The hlist head on the right * * The lists start out as [@left ][node1 ... ] and * [@right ][node2 ... ] * The lists end up as [@left ][node2 ... ] * [@right ][node1 ... ] */ static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) { struct hlist_node *node1 = left->first; struct hlist_node *node2 = right->first; rcu_assign_pointer(left->first, node2); rcu_assign_pointer(right->first, node1); WRITE_ONCE(node2->pprev, &left->first); WRITE_ONCE(node1->pprev, &right->first); } /* * return the first or the next element in an RCU protected hlist */ #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) /** * hlist_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; WRITE_ONCE(n->pprev, &h->first); rcu_assign_pointer(hlist_first_rcu(h), n); if (first) WRITE_ONCE(first->pprev, &n->next); } /** * hlist_add_tail_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_tail_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *i, *last = NULL; /* Note: write side code, so rcu accessors are not needed. */ for (i = h->first; i; i = i->next) last = i; if (last) { n->next = last->next; WRITE_ONCE(n->pprev, &last->next); rcu_assign_pointer(hlist_next_rcu(last), n); } else { hlist_add_head_rcu(n, h); } } /** * hlist_add_before_rcu * @n: the new element to add to the hash list. * @next: the existing element to add the new element before. * * Description: * Adds the specified element to the specified hlist * before the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ static inline void hlist_add_before_rcu(struct hlist_node *n, struct hlist_node *next) { WRITE_ONCE(n->pprev, next->pprev); n->next = next; rcu_assign_pointer(hlist_pprev_rcu(n), n); WRITE_ONCE(next->pprev, &n->next); } /** * hlist_add_behind_rcu * @n: the new element to add to the hash list. * @prev: the existing element to add the new element after. * * Description: * Adds the specified element to the specified hlist * after the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ static inline void hlist_add_behind_rcu(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; WRITE_ONCE(n->pprev, &prev->next); rcu_assign_pointer(hlist_next_rcu(prev), n); if (n->next) WRITE_ONCE(n->next->pprev, &n->next); } #define __hlist_for_each_rcu(pos, head) \ for (pos = rcu_dereference(hlist_first_rcu(head)); \ pos; \ pos = rcu_dereference(hlist_next_rcu(pos))) /** * hlist_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ for (__list_check_rcu(dummy, ## cond, 0), \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_srcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * @cond: lockdep expression for the lock required to traverse the list. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by srcu_read_lock(). * The lockdep expression srcu_read_lock_held() can be passed as the * cond argument from read side. */ #define hlist_for_each_entry_srcu(pos, head, member, cond) \ for (__list_check_srcu(cond), \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). * * This is the same as hlist_for_each_entry_rcu() except that it does * not do any RCU debugging or tracing. */ #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define hlist_for_each_entry_rcu_bh(pos, head, member) \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu(pos, member) \ for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from_rcu(pos, member) \ for (; pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif |
2971 2966 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_WAIT_H #define _LINUX_WAIT_H /* * Linux wait queue related types and methods */ #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <asm/current.h> typedef struct wait_queue_entry wait_queue_entry_t; typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); /* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 #define WQ_FLAG_CUSTOM 0x04 #define WQ_FLAG_DONE 0x08 #define WQ_FLAG_PRIORITY 0x10 /* * A single wait-queue entry structure: */ struct wait_queue_entry { unsigned int flags; void *private; wait_queue_func_t func; struct list_head entry; }; struct wait_queue_head { spinlock_t lock; struct list_head head; }; typedef struct wait_queue_head wait_queue_head_t; struct task_struct; /* * Macros for declaration and initialisaton of the datatypes */ #define __WAITQUEUE_INITIALIZER(name, tsk) { \ .private = tsk, \ .func = default_wake_function, \ .entry = { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = LIST_HEAD_INIT(name.head) } #define DECLARE_WAIT_QUEUE_HEAD(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); #define init_waitqueue_head(wq_head) \ do { \ static struct lock_class_key __key; \ \ __init_waitqueue_head((wq_head), #wq_head, &__key); \ } while (0) #ifdef CONFIG_LOCKDEP # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ({ init_waitqueue_head(&name); name; }) # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) #else # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) #endif static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p) { wq_entry->flags = 0; wq_entry->private = p; wq_entry->func = default_wake_function; } static inline void init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func) { wq_entry->flags = 0; wq_entry->private = NULL; wq_entry->func = func; } /** * waitqueue_active -- locklessly test for waiters on the queue * @wq_head: the waitqueue to test for waiters * * returns true if the wait list is not empty * * NOTE: this function is lockless and requires care, incorrect usage _will_ * lead to sporadic and non-obvious failure. * * Use either while holding wait_queue_head::lock or when used for wakeups * with an extra smp_mb() like:: * * CPU0 - waker CPU1 - waiter * * for (;;) { * @cond = true; prepare_to_wait(&wq_head, &wait, state); * smp_mb(); // smp_mb() from set_current_state() * if (waitqueue_active(wq_head)) if (@cond) * wake_up(wq_head); break; * schedule(); * } * finish_wait(&wq_head, &wait); * * Because without the explicit smp_mb() it's possible for the * waitqueue_active() load to get hoisted over the @cond store such that we'll * observe an empty wait list while the waiter might not observe @cond. * * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), * which (when the lock is uncontended) are of roughly equal cost. */ static inline int waitqueue_active(struct wait_queue_head *wq_head) { return !list_empty(&wq_head->head); } /** * wq_has_single_sleeper - check if there is only one sleeper * @wq_head: wait queue head * * Returns true of wq_head has only one sleeper on the list. * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) { return list_is_singular(&wq_head->head); } /** * wq_has_sleeper - check if there are any waiting processes * @wq_head: wait queue head * * Returns true if wq_head has waiting processes * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) { /* * We need to be sure we are in sync with the * add_wait_queue modifications to the wait queue. * * This memory barrier should be paired with one on the * waiting side. */ smp_mb(); return waitqueue_active(wq_head); } extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { struct list_head *head = &wq_head->head; struct wait_queue_entry *wq; list_for_each_entry(wq, &wq_head->head, entry) { if (!(wq->flags & WQ_FLAG_PRIORITY)) break; head = &wq->entry; } list_add(&wq_entry->entry, head); } /* * Used for wake-one threads: */ static inline void __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq_head, wq_entry); } static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_add_tail(&wq_entry->entry, &wq_head->head); } static inline void __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_entry_tail(wq_head, wq_entry); } static inline void __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_del(&wq_entry->entry); } int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) /* * Wakeup macros to be used to report events to the targets. */ #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) #define wake_up_poll_on_current_cpu(x, m) \ __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m)) #define wake_up_locked_poll(x, m) \ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) #define wake_up_interruptible_poll(x, m) \ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) #define wake_up_interruptible_sync_poll_locked(x, m) \ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) /** * wake_up_pollfree - signal that a polled waitqueue is going away * @wq_head: the wait queue head * * In the very rare cases where a ->poll() implementation uses a waitqueue whose * lifetime is tied to a task rather than to the 'struct file' being polled, * this function must be called before the waitqueue is freed so that * non-blocking polls (e.g. epoll) are notified that the queue is going away. * * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. */ static inline void wake_up_pollfree(struct wait_queue_head *wq_head) { /* * For performance reasons, we don't always take the queue lock here. * Therefore, we might race with someone removing the last entry from * the queue, and proceed while they still hold the queue lock. * However, rcu_read_lock() is required to be held in such cases, so we * can safely proceed with an RCU-delayed free. */ if (waitqueue_active(wq_head)) __wake_up_pollfree(wq_head); } #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ if (__cond && !__ret) \ __ret = 1; \ __cond || !__ret; \ }) #define ___wait_is_interruptible(state) \ (!__builtin_constant_p(state) || \ (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags); /* * The below macro ___wait_event() has an explicit shadow of the __ret * variable when used from the wait_event_*() macros. * * This is so that both can use the ___wait_cond_timeout() construct * to wrap the condition. * * The type inconsistency of the wait_event_*() __ret variable is also * on purpose; we use long where we can return timeout values and int * otherwise. */ #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \ ({ \ __label__ __out; \ struct wait_queue_entry __wq_entry; \ long __ret = ret; /* explicit shadow */ \ \ init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ for (;;) { \ long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\ \ if (condition) \ break; \ \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ goto __out; \ } \ \ cmd; \ } \ finish_wait(&wq_head, &__wq_entry); \ __out: __ret; \ }) #define __wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __wait_event(wq_head, condition); \ } while (0) #define __io_wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ io_schedule()) /* * io_wait_event() -- like wait_event() but with io_schedule() */ #define io_wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __io_wait_event(wq_head, condition); \ } while (0) #define __wait_event_freezable(wq_head, condition) \ ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \ 0, 0, schedule()) /** * wait_event_freezable - sleep (or freeze) until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute * to system load) until the @condition evaluates to true. The * @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_freezable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable(wq_head, condition); \ __ret; \ }) #define __wait_event_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \ __ret = schedule_timeout(__ret)) /* * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid * increasing load and is freezable. */ #define wait_event_freezable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ cmd1; schedule(); cmd2) /* * Just like wait_event_cmd(), except it sets exclusive flag */ #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ cmd1; schedule(); cmd2) /** * wait_event_cmd - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @cmd1: the command will be executed before sleep * @cmd2: the command will be executed after sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_interruptible(wq_head, condition) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event_interruptible - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible(wq_head, condition); \ __ret; \ }) #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_INTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a signal. */ #define wait_event_interruptible_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_interruptible_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \ ({ \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) { \ hrtimer_set_expires_range_ns(&__t.timer, timeout, \ current->timer_slack_ns); \ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ } \ \ __ret = ___wait_event(wq_head, condition, state, 0, 0, \ if (!__t.task) { \ __ret = -ETIME; \ break; \ } \ schedule()); \ \ hrtimer_cancel(&__t.timer); \ destroy_hrtimer_on_stack(&__t.timer); \ __ret; \ }) /** * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, or -ETIME if the timeout * elapsed. */ #define wait_event_hrtimeout(wq_head, condition, timeout) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /** * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, -ERESTARTSYS if it was * interrupted by a signal, or -ETIME if the timeout elapsed. */ #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ ({ \ long __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define __wait_event_interruptible_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ schedule()) #define wait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_killable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ schedule()) #define wait_event_killable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_freezable_exclusive(wq, condition) \ ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\ schedule()) #define wait_event_freezable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable_exclusive(wq, condition); \ __ret; \ }) /** * wait_event_idle - wait for a condition without contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \ } while (0) /** * wait_event_idle_exclusive - wait for a condition with contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle_exclusive(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \ } while (0) #define __wait_event_idle_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 1, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\ __ret; \ }) extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ ({ \ int __ret; \ DEFINE_WAIT(__wait); \ if (exclusive) \ __wait.flags |= WQ_FLAG_EXCLUSIVE; \ do { \ __ret = fn(&(wq), &__wait); \ if (__ret) \ break; \ } while (!(condition)); \ __remove_wait_queue(&(wq), &__wait); \ __set_current_state(TASK_RUNNING); \ __ret; \ }) /** * wait_event_interruptible_locked - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) /** * wait_event_interruptible_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) /** * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) /** * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) #define __wait_event_killable(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) /** * wait_event_killable - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_killable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable(wq_head, condition); \ __ret; \ }) #define __wait_event_state(wq, condition, state) \ ___wait_event(wq, condition, state, 0, 0, schedule()) /** * wait_event_state - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @state: state to sleep in * * The process is put to sleep (@state) until the @condition evaluates to true * or a signal is received (when allowed by @state). The @condition is checked * each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a signal * (when allowed by @state) and 0 if @condition evaluated to true. */ #define wait_event_state(wq_head, condition, state) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_state(wq_head, condition, state); \ __ret; \ }) #define __wait_event_killable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_KILLABLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a kill signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a kill signal. * * Only kill signals interrupt this process. */ #define wait_event_killable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_killable_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_lock_irq_cmd - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd * and schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. */ #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, cmd); \ } while (0) /** * wait_event_lock_irq - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. */ #define wait_event_lock_irq(wq_head, condition, lock) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, ); \ } while (0) #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. * The condition is checked under the lock. This is expected to * be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd and * schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock, cmd); \ __ret; \ }) /** * wait_event_interruptible_lock_irq - sleep until a condition gets true. * The condition is checked under the lock. This is expected * to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock,); \ __ret; \ }) #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ state, 0, timeout, \ spin_unlock_irq(&lock); \ __ret = schedule_timeout(__ret); \ spin_lock_irq(&lock)); /** * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets * true or a timeout elapses. The condition is checked under * the lock. This is expected to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it * was interrupted by a signal, and the remaining jiffies otherwise * if the condition evaluated to true before the timeout elapsed. */ #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \ timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /* * Waitqueues which are removed from the waitqueue_head at wakeup time */ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); #define DEFINE_WAIT_FUNC(name, function) \ struct wait_queue_entry name = { \ .private = current, \ .func = function, \ .entry = LIST_HEAD_INIT((name).entry), \ } #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) #define init_wait(wait) \ do { \ (wait)->private = current; \ (wait)->func = autoremove_wake_function; \ INIT_LIST_HEAD(&(wait)->entry); \ (wait)->flags = 0; \ } while (0) typedef int (*task_call_f)(struct task_struct *p, void *arg); extern int task_call_func(struct task_struct *p, task_call_f func, void *arg); #endif /* _LINUX_WAIT_H */ |
2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 | // SPDX-License-Identifier: GPL-2.0+ #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <linux/firmware.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define SD_INIT1_FIRMWARE "ene-ub6250/sd_init1.bin" #define SD_INIT2_FIRMWARE "ene-ub6250/sd_init2.bin" #define SD_RW_FIRMWARE "ene-ub6250/sd_rdwr.bin" #define MS_INIT_FIRMWARE "ene-ub6250/ms_init.bin" #define MSP_RW_FIRMWARE "ene-ub6250/msp_rdwr.bin" #define MS_RW_FIRMWARE "ene-ub6250/ms_rdwr.bin" #define DRV_NAME "ums_eneub6250" MODULE_DESCRIPTION("Driver for ENE UB6250 reader"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(USB_STORAGE); MODULE_FIRMWARE(SD_INIT1_FIRMWARE); MODULE_FIRMWARE(SD_INIT2_FIRMWARE); MODULE_FIRMWARE(SD_RW_FIRMWARE); MODULE_FIRMWARE(MS_INIT_FIRMWARE); MODULE_FIRMWARE(MSP_RW_FIRMWARE); MODULE_FIRMWARE(MS_RW_FIRMWARE); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)} static const struct usb_device_id ene_ub6250_usb_ids[] = { # include "unusual_ene_ub6250.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ene_ub6250_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev ene_ub6250_unusual_dev_list[] = { # include "unusual_ene_ub6250.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* ENE bin code len */ #define ENE_BIN_CODE_LEN 0x800 /* EnE HW Register */ #define REG_CARD_STATUS 0xFF83 #define REG_HW_TRAP1 0xFF89 /* SRB Status */ #define SS_SUCCESS 0x000000 /* No Sense */ #define SS_NOT_READY 0x023A00 /* Medium not present */ #define SS_MEDIUM_ERR 0x031100 /* Unrecovered read error */ #define SS_HW_ERR 0x040800 /* Communication failure */ #define SS_ILLEGAL_REQUEST 0x052000 /* Invalid command */ #define SS_UNIT_ATTENTION 0x062900 /* Reset occurred */ /* ENE Load FW Pattern */ #define SD_INIT1_PATTERN 1 #define SD_INIT2_PATTERN 2 #define SD_RW_PATTERN 3 #define MS_INIT_PATTERN 4 #define MSP_RW_PATTERN 5 #define MS_RW_PATTERN 6 #define SM_INIT_PATTERN 7 #define SM_RW_PATTERN 8 #define FDIR_WRITE 0 #define FDIR_READ 1 /* For MS Card */ /* Status Register 1 */ #define MS_REG_ST1_MB 0x80 /* media busy */ #define MS_REG_ST1_FB1 0x40 /* flush busy 1 */ #define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */ #define MS_REG_ST1_UCDT 0x10 /* unable to correct data */ #define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */ #define MS_REG_ST1_UCEX 0x04 /* unable to correct extra */ #define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */ #define MS_REG_ST1_UCFG 0x01 /* unable to correct overwrite flag */ #define MS_REG_ST1_DEFAULT (MS_REG_ST1_MB | MS_REG_ST1_FB1 | MS_REG_ST1_DTER | MS_REG_ST1_UCDT | MS_REG_ST1_EXER | MS_REG_ST1_UCEX | MS_REG_ST1_FGER | MS_REG_ST1_UCFG) /* Overwrite Area */ #define MS_REG_OVR_BKST 0x80 /* block status */ #define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */ #define MS_REG_OVR_BKST_NG 0x00 /* NG */ #define MS_REG_OVR_PGST0 0x40 /* page status */ #define MS_REG_OVR_PGST1 0x20 #define MS_REG_OVR_PGST_MASK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1) #define MS_REG_OVR_PGST_OK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1) /* OK */ #define MS_REG_OVR_PGST_NG MS_REG_OVR_PGST1 /* NG */ #define MS_REG_OVR_PGST_DATA_ERROR 0x00 /* data error */ #define MS_REG_OVR_UDST 0x10 /* update status */ #define MS_REG_OVR_UDST_UPDATING 0x00 /* updating */ #define MS_REG_OVR_UDST_NO_UPDATE MS_REG_OVR_UDST #define MS_REG_OVR_RESERVED 0x08 #define MS_REG_OVR_DEFAULT (MS_REG_OVR_BKST_OK | MS_REG_OVR_PGST_OK | MS_REG_OVR_UDST_NO_UPDATE | MS_REG_OVR_RESERVED) /* Management Flag */ #define MS_REG_MNG_SCMS0 0x20 /* serial copy management system */ #define MS_REG_MNG_SCMS1 0x10 #define MS_REG_MNG_SCMS_MASK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1) #define MS_REG_MNG_SCMS_COPY_OK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1) #define MS_REG_MNG_SCMS_ONE_COPY MS_REG_MNG_SCMS1 #define MS_REG_MNG_SCMS_NO_COPY 0x00 #define MS_REG_MNG_ATFLG 0x08 /* address transfer table flag */ #define MS_REG_MNG_ATFLG_OTHER MS_REG_MNG_ATFLG /* other */ #define MS_REG_MNG_ATFLG_ATTBL 0x00 /* address transfer table */ #define MS_REG_MNG_SYSFLG 0x04 /* system flag */ #define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */ #define MS_REG_MNG_SYSFLG_BOOT 0x00 /* system block */ #define MS_REG_MNG_RESERVED 0xc3 #define MS_REG_MNG_DEFAULT (MS_REG_MNG_SCMS_COPY_OK | MS_REG_MNG_ATFLG_OTHER | MS_REG_MNG_SYSFLG_USER | MS_REG_MNG_RESERVED) #define MS_MAX_PAGES_PER_BLOCK 32 #define MS_MAX_INITIAL_ERROR_BLOCKS 10 #define MS_LIB_BITS_PER_BYTE 8 #define MS_SYSINF_FORMAT_FAT 1 #define MS_SYSINF_USAGE_GENERAL 0 #define MS_SYSINF_MSCLASS_TYPE_1 1 #define MS_SYSINF_PAGE_SIZE MS_BYTES_PER_PAGE /* fixed */ #define MS_SYSINF_CARDTYPE_RDONLY 1 #define MS_SYSINF_CARDTYPE_RDWR 2 #define MS_SYSINF_CARDTYPE_HYBRID 3 #define MS_SYSINF_SECURITY 0x01 #define MS_SYSINF_SECURITY_NO_SUPPORT MS_SYSINF_SECURITY #define MS_SYSINF_SECURITY_SUPPORT 0 #define MS_SYSINF_RESERVED1 1 #define MS_SYSINF_RESERVED2 1 #define MS_SYSENT_TYPE_INVALID_BLOCK 0x01 #define MS_SYSENT_TYPE_CIS_IDI 0x0a /* CIS/IDI */ #define SIZE_OF_KIRO 1024 #define BYTE_MASK 0xff /* ms error code */ #define MS_STATUS_WRITE_PROTECT 0x0106 #define MS_STATUS_SUCCESS 0x0000 #define MS_ERROR_FLASH_READ 0x8003 #define MS_ERROR_FLASH_ERASE 0x8005 #define MS_LB_ERROR 0xfff0 #define MS_LB_BOOT_BLOCK 0xfff1 #define MS_LB_INITIAL_ERROR 0xfff2 #define MS_STATUS_SUCCESS_WITH_ECC 0xfff3 #define MS_LB_ACQUIRED_ERROR 0xfff4 #define MS_LB_NOT_USED_ERASED 0xfff5 #define MS_NOCARD_ERROR 0xfff8 #define MS_NO_MEMORY_ERROR 0xfff9 #define MS_STATUS_INT_ERROR 0xfffa #define MS_STATUS_ERROR 0xfffe #define MS_LB_NOT_USED 0xffff #define MS_REG_MNG_SYSFLG 0x04 /* system flag */ #define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */ #define MS_BOOT_BLOCK_ID 0x0001 #define MS_BOOT_BLOCK_FORMAT_VERSION 0x0100 #define MS_BOOT_BLOCK_DATA_ENTRIES 2 #define MS_NUMBER_OF_SYSTEM_ENTRY 4 #define MS_NUMBER_OF_BOOT_BLOCK 2 #define MS_BYTES_PER_PAGE 512 #define MS_LOGICAL_BLOCKS_PER_SEGMENT 496 #define MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT 494 #define MS_PHYSICAL_BLOCKS_PER_SEGMENT 0x200 /* 512 */ #define MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK 0x1ff /* overwrite area */ #define MS_REG_OVR_BKST 0x80 /* block status */ #define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */ #define MS_REG_OVR_BKST_NG 0x00 /* NG */ /* Status Register 1 */ #define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */ #define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */ #define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */ /* MemoryStick Register */ /* Status Register 0 */ #define MS_REG_ST0_WP 0x01 /* write protected */ #define MS_REG_ST0_WP_ON MS_REG_ST0_WP #define MS_LIB_CTRL_RDONLY 0 #define MS_LIB_CTRL_WRPROTECT 1 /*dphy->log table */ #define ms_libconv_to_logical(pdx, PhyBlock) (((PhyBlock) >= (pdx)->MS_Lib.NumberOfPhyBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Phy2LogMap[PhyBlock]) #define ms_libconv_to_physical(pdx, LogBlock) (((LogBlock) >= (pdx)->MS_Lib.NumberOfLogBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Log2PhyMap[LogBlock]) #define ms_lib_ctrl_set(pdx, Flag) ((pdx)->MS_Lib.flags |= (1 << (Flag))) #define ms_lib_ctrl_reset(pdx, Flag) ((pdx)->MS_Lib.flags &= ~(1 << (Flag))) #define ms_lib_ctrl_check(pdx, Flag) ((pdx)->MS_Lib.flags & (1 << (Flag))) #define ms_lib_iswritable(pdx) ((ms_lib_ctrl_check((pdx), MS_LIB_CTRL_RDONLY) == 0) && (ms_lib_ctrl_check(pdx, MS_LIB_CTRL_WRPROTECT) == 0)) #define ms_lib_clear_pagemap(pdx) memset((pdx)->MS_Lib.pagemap, 0, sizeof((pdx)->MS_Lib.pagemap)) #define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0)) /* SD_STATUS bits */ #define SD_Insert BIT(0) #define SD_Ready BIT(1) #define SD_MediaChange BIT(2) #define SD_IsMMC BIT(3) #define SD_HiCapacity BIT(4) #define SD_HiSpeed BIT(5) #define SD_WtP BIT(6) /* Bit 7 reserved */ /* MS_STATUS bits */ #define MS_Insert BIT(0) #define MS_Ready BIT(1) #define MS_MediaChange BIT(2) #define MS_IsMSPro BIT(3) #define MS_IsMSPHG BIT(4) /* Bit 5 reserved */ #define MS_WtP BIT(6) /* Bit 7 reserved */ /* SM_STATUS bits */ #define SM_Insert BIT(0) #define SM_Ready BIT(1) #define SM_MediaChange BIT(2) /* Bits 3-5 reserved */ #define SM_WtP BIT(6) #define SM_IsMS BIT(7) struct ms_bootblock_cis { u8 bCistplDEVICE[6]; /* 0 */ u8 bCistplDEVICE0C[6]; /* 6 */ u8 bCistplJEDECC[4]; /* 12 */ u8 bCistplMANFID[6]; /* 16 */ u8 bCistplVER1[32]; /* 22 */ u8 bCistplFUNCID[4]; /* 54 */ u8 bCistplFUNCE0[4]; /* 58 */ u8 bCistplFUNCE1[5]; /* 62 */ u8 bCistplCONF[7]; /* 67 */ u8 bCistplCFTBLENT0[10];/* 74 */ u8 bCistplCFTBLENT1[8]; /* 84 */ u8 bCistplCFTBLENT2[12];/* 92 */ u8 bCistplCFTBLENT3[8]; /* 104 */ u8 bCistplCFTBLENT4[17];/* 112 */ u8 bCistplCFTBLENT5[8]; /* 129 */ u8 bCistplCFTBLENT6[17];/* 137 */ u8 bCistplCFTBLENT7[8]; /* 154 */ u8 bCistplNOLINK[3]; /* 162 */ } ; struct ms_bootblock_idi { #define MS_IDI_GENERAL_CONF 0x848A u16 wIDIgeneralConfiguration; /* 0 */ u16 wIDInumberOfCylinder; /* 1 */ u16 wIDIreserved0; /* 2 */ u16 wIDInumberOfHead; /* 3 */ u16 wIDIbytesPerTrack; /* 4 */ u16 wIDIbytesPerSector; /* 5 */ u16 wIDIsectorsPerTrack; /* 6 */ u16 wIDItotalSectors[2]; /* 7-8 high,low */ u16 wIDIreserved1[11]; /* 9-19 */ u16 wIDIbufferType; /* 20 */ u16 wIDIbufferSize; /* 21 */ u16 wIDIlongCmdECC; /* 22 */ u16 wIDIfirmVersion[4]; /* 23-26 */ u16 wIDImodelName[20]; /* 27-46 */ u16 wIDIreserved2; /* 47 */ u16 wIDIlongWordSupported; /* 48 */ u16 wIDIdmaSupported; /* 49 */ u16 wIDIreserved3; /* 50 */ u16 wIDIpioTiming; /* 51 */ u16 wIDIdmaTiming; /* 52 */ u16 wIDItransferParameter; /* 53 */ u16 wIDIformattedCylinder; /* 54 */ u16 wIDIformattedHead; /* 55 */ u16 wIDIformattedSectorsPerTrack;/* 56 */ u16 wIDIformattedTotalSectors[2];/* 57-58 */ u16 wIDImultiSector; /* 59 */ u16 wIDIlbaSectors[2]; /* 60-61 */ u16 wIDIsingleWordDMA; /* 62 */ u16 wIDImultiWordDMA; /* 63 */ u16 wIDIreserved4[192]; /* 64-255 */ }; struct ms_bootblock_sysent_rec { u32 dwStart; u32 dwSize; u8 bType; u8 bReserved[3]; }; struct ms_bootblock_sysent { struct ms_bootblock_sysent_rec entry[MS_NUMBER_OF_SYSTEM_ENTRY]; }; struct ms_bootblock_sysinf { u8 bMsClass; /* must be 1 */ u8 bCardType; /* see below */ u16 wBlockSize; /* n KB */ u16 wBlockNumber; /* number of physical block */ u16 wTotalBlockNumber; /* number of logical block */ u16 wPageSize; /* must be 0x200 */ u8 bExtraSize; /* 0x10 */ u8 bSecuritySupport; u8 bAssemblyDate[8]; u8 bFactoryArea[4]; u8 bAssemblyMakerCode; u8 bAssemblyMachineCode[3]; u16 wMemoryMakerCode; u16 wMemoryDeviceCode; u16 wMemorySize; u8 bReserved1; u8 bReserved2; u8 bVCC; u8 bVPP; u16 wControllerChipNumber; u16 wControllerFunction; /* New MS */ u8 bReserved3[9]; /* New MS */ u8 bParallelSupport; /* New MS */ u16 wFormatValue; /* New MS */ u8 bFormatType; u8 bUsage; u8 bDeviceType; u8 bReserved4[22]; u8 bFUValue3; u8 bFUValue4; u8 bReserved5[15]; }; struct ms_bootblock_header { u16 wBlockID; u16 wFormatVersion; u8 bReserved1[184]; u8 bNumberOfDataEntry; u8 bReserved2[179]; }; struct ms_bootblock_page0 { struct ms_bootblock_header header; struct ms_bootblock_sysent sysent; struct ms_bootblock_sysinf sysinf; }; struct ms_bootblock_cis_idi { union { struct ms_bootblock_cis cis; u8 dmy[256]; } cis; union { struct ms_bootblock_idi idi; u8 dmy[256]; } idi; }; /* ENE MS Lib struct */ struct ms_lib_type_extdat { u8 reserved; u8 intr; u8 status0; u8 status1; u8 ovrflg; u8 mngflg; u16 logadr; }; struct ms_lib_ctrl { u32 flags; u32 BytesPerSector; u32 NumberOfCylinder; u32 SectorsPerCylinder; u16 cardType; /* R/W, RO, Hybrid */ u16 blockSize; u16 PagesPerBlock; u16 NumberOfPhyBlock; u16 NumberOfLogBlock; u16 NumberOfSegment; u16 *Phy2LogMap; /* phy2log table */ u16 *Log2PhyMap; /* log2phy table */ u16 wrtblk; unsigned char *pagemap[(MS_MAX_PAGES_PER_BLOCK + (MS_LIB_BITS_PER_BYTE-1)) / MS_LIB_BITS_PER_BYTE]; unsigned char *blkpag; struct ms_lib_type_extdat *blkext; unsigned char copybuf[512]; }; /* SD Block Length */ /* 2^9 = 512 Bytes, The HW maximum read/write data length */ #define SD_BLOCK_LEN 9 struct ene_ub6250_info { /* I/O bounce buffer */ u8 *bbuf; /* for 6250 code */ u8 SD_Status; u8 MS_Status; u8 SM_Status; /* ----- SD Control Data ---------------- */ /*SD_REGISTER SD_Regs; */ u16 SD_Block_Mult; u8 SD_READ_BL_LEN; u16 SD_C_SIZE; u8 SD_C_SIZE_MULT; /* SD/MMC New spec. */ u8 SD_SPEC_VER; u8 SD_CSD_VER; u8 SD20_HIGH_CAPACITY; u32 HC_C_SIZE; u8 MMC_SPEC_VER; u8 MMC_BusWidth; u8 MMC_HIGH_CAPACITY; /*----- MS Control Data ---------------- */ bool MS_SWWP; u32 MSP_TotalBlock; struct ms_lib_ctrl MS_Lib; bool MS_IsRWPage; u16 MS_Model; /*----- SM Control Data ---------------- */ u8 SM_DeviceID; u8 SM_CardID; unsigned char *testbuf; u8 BIN_FLAG; u32 bl_num; int SrbStatus; /*------Power Managerment ---------------*/ bool Power_IsResum; }; static int ene_sd_init(struct us_data *us); static int ene_ms_init(struct us_data *us); static int ene_load_bincode(struct us_data *us, unsigned char flag); static void ene_ub6250_info_destructor(void *extra) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra; if (!extra) return; kfree(info->bbuf); } static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; unsigned int residue; unsigned int cswlen = 0, partial = 0; unsigned int transfer_length = bcb->DataTransferLength; /* usb_stor_dbg(us, "transport --- ene_send_scsi_cmd\n"); */ /* send cmd to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "send cmd to out endpoint fail ---\n"); return USB_STOR_TRANSPORT_ERROR; } if (buf) { unsigned int pipe = fDir; if (fDir == FDIR_READ) pipe = us->recv_bulk_pipe; else pipe = us->send_bulk_pipe; /* Bulk */ if (use_sg) { result = usb_stor_bulk_srb(us, pipe, us->srb); } else { result = usb_stor_bulk_transfer_sg(us, pipe, buf, transfer_length, 0, &partial); } if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "data transfer fail ---\n"); return USB_STOR_TRANSPORT_ERROR; } } /* Get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); if (result == USB_STOR_XFER_SHORT && cswlen == 0) { usb_stor_dbg(us, "Received 0-length CSW; retrying...\n"); result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); } if (result == USB_STOR_XFER_STALLED) { /* get the status again */ usb_stor_dbg(us, "Attempting to get CSW (2nd try)...\n"); result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL); } if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* check bulk status */ residue = le32_to_cpu(bcs->Residue); /* * try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) { residue = min(residue, transfer_length); if (us->srb != NULL) scsi_set_resid(us->srb, max(scsi_get_resid(us->srb), residue)); } if (bcs->Status != US_BULK_STAT_OK) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } static int do_scsi_request_sense(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; unsigned char buf[18]; memset(buf, 0, 18); buf[0] = 0x70; /* Current error */ buf[2] = info->SrbStatus >> 16; /* Sense key */ buf[7] = 10; /* Additional length */ buf[12] = info->SrbStatus >> 8; /* ASC */ buf[13] = info->SrbStatus; /* ASCQ */ usb_stor_set_xfer_buf(buf, sizeof(buf), srb); return USB_STOR_TRANSPORT_GOOD; } static int do_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb) { unsigned char data_ptr[36] = { 0x00, 0x00, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30 }; usb_stor_set_xfer_buf(data_ptr, 36, srb); return USB_STOR_TRANSPORT_GOOD; } static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) return USB_STOR_TRANSPORT_GOOD; else { ene_sd_init(us); return USB_STOR_TRANSPORT_GOOD; } return USB_STOR_TRANSPORT_GOOD; } static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; unsigned char mediaNoWP[12] = { 0x0b, 0x00, 0x00, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; unsigned char mediaWP[12] = { 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; if (info->SD_Status & SD_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); return USB_STOR_TRANSPORT_GOOD; } static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) { u32 bl_num; u32 bl_len; unsigned int offset = 0; unsigned char buf[8]; struct scatterlist *sg = NULL; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; usb_stor_dbg(us, "sd_scsi_read_capacity\n"); if (info->SD_Status & SD_HiCapacity) { bl_len = 0x200; if (info->SD_Status & SD_IsMMC) bl_num = info->HC_C_SIZE-1; else bl_num = (info->HC_C_SIZE + 1) * 1024 - 1; } else { bl_len = 1 << (info->SD_READ_BL_LEN); bl_num = info->SD_Block_Mult * (info->SD_C_SIZE + 1) * (1 << (info->SD_C_SIZE_MULT + 2)) - 1; } info->bl_num = bl_num; usb_stor_dbg(us, "bl_len = %x\n", bl_len); usb_stor_dbg(us, "bl_num = %x\n", bl_num); /*srb->request_bufflen = 8; */ buf[0] = (bl_num >> 24) & 0xff; buf[1] = (bl_num >> 16) & 0xff; buf[2] = (bl_num >> 8) & 0xff; buf[3] = (bl_num >> 0) & 0xff; buf[4] = (bl_len >> 24) & 0xff; buf[5] = (bl_len >> 16) & 0xff; buf[6] = (bl_len >> 8) & 0xff; buf[7] = (bl_len >> 0) & 0xff; usb_stor_access_xfer_buf(buf, 8, srb, &sg, &offset, TO_XFER_BUF); return USB_STOR_TRANSPORT_GOOD; } static int sd_scsi_read(struct us_data *us, struct scsi_cmnd *srb) { int result; unsigned char *cdb = srb->cmnd; struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) | ((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff); u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff); u32 bnByte = bn * 0x200; u32 blenByte = blen * 0x200; if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; result = ene_load_bincode(us, SD_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load SD RW pattern Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[5] = (unsigned char)(bnByte); bcb->CDB[4] = (unsigned char)(bnByte>>8); bcb->CDB[3] = (unsigned char)(bnByte>>16); bcb->CDB[2] = (unsigned char)(bnByte>>24); result = ene_send_scsi_cmd(us, FDIR_READ, scsi_sglist(srb), 1); return result; } static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb) { int result; unsigned char *cdb = srb->cmnd; struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) | ((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff); u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff); u32 bnByte = bn * 0x200; u32 blenByte = blen * 0x200; if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; result = ene_load_bincode(us, SD_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load SD RW pattern Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = US_BULK_FLAG_OUT; bcb->CDB[0] = 0xF0; bcb->CDB[5] = (unsigned char)(bnByte); bcb->CDB[4] = (unsigned char)(bnByte>>8); bcb->CDB[3] = (unsigned char)(bnByte>>16); bcb->CDB[2] = (unsigned char)(bnByte>>24); result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); return result; } /* * ENE MS Card */ static int ms_lib_set_logicalpair(struct us_data *us, u16 logblk, u16 phyblk) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if ((logblk >= info->MS_Lib.NumberOfLogBlock) || (phyblk >= info->MS_Lib.NumberOfPhyBlock)) return (u32)-1; info->MS_Lib.Phy2LogMap[phyblk] = logblk; info->MS_Lib.Log2PhyMap[logblk] = phyblk; return 0; } static int ms_lib_set_logicalblockmark(struct us_data *us, u16 phyblk, u16 mark) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if (phyblk >= info->MS_Lib.NumberOfPhyBlock) return (u32)-1; info->MS_Lib.Phy2LogMap[phyblk] = mark; return 0; } static int ms_lib_set_initialerrorblock(struct us_data *us, u16 phyblk) { return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_INITIAL_ERROR); } static int ms_lib_set_bootblockmark(struct us_data *us, u16 phyblk) { return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_BOOT_BLOCK); } static int ms_lib_free_logicalmap(struct us_data *us) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; kfree(info->MS_Lib.Phy2LogMap); info->MS_Lib.Phy2LogMap = NULL; kfree(info->MS_Lib.Log2PhyMap); info->MS_Lib.Log2PhyMap = NULL; return 0; } static int ms_lib_alloc_logicalmap(struct us_data *us) { u32 i; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; info->MS_Lib.Phy2LogMap = kmalloc_array(info->MS_Lib.NumberOfPhyBlock, sizeof(u16), GFP_KERNEL); info->MS_Lib.Log2PhyMap = kmalloc_array(info->MS_Lib.NumberOfLogBlock, sizeof(u16), GFP_KERNEL); if ((info->MS_Lib.Phy2LogMap == NULL) || (info->MS_Lib.Log2PhyMap == NULL)) { ms_lib_free_logicalmap(us); return (u32)-1; } for (i = 0; i < info->MS_Lib.NumberOfPhyBlock; i++) info->MS_Lib.Phy2LogMap[i] = MS_LB_NOT_USED; for (i = 0; i < info->MS_Lib.NumberOfLogBlock; i++) info->MS_Lib.Log2PhyMap[i] = MS_LB_NOT_USED; return 0; } static void ms_lib_clear_writebuf(struct us_data *us) { int i; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; info->MS_Lib.wrtblk = (u16)-1; ms_lib_clear_pagemap(info); if (info->MS_Lib.blkpag) memset(info->MS_Lib.blkpag, 0xff, info->MS_Lib.PagesPerBlock * info->MS_Lib.BytesPerSector); if (info->MS_Lib.blkext) { for (i = 0; i < info->MS_Lib.PagesPerBlock; i++) { info->MS_Lib.blkext[i].status1 = MS_REG_ST1_DEFAULT; info->MS_Lib.blkext[i].ovrflg = MS_REG_OVR_DEFAULT; info->MS_Lib.blkext[i].mngflg = MS_REG_MNG_DEFAULT; info->MS_Lib.blkext[i].logadr = MS_LB_NOT_USED; } } } static int ms_count_freeblock(struct us_data *us, u16 PhyBlock) { u32 Ende, Count; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; Ende = PhyBlock + MS_PHYSICAL_BLOCKS_PER_SEGMENT; for (Count = 0; PhyBlock < Ende; PhyBlock++) { switch (info->MS_Lib.Phy2LogMap[PhyBlock]) { case MS_LB_NOT_USED: case MS_LB_NOT_USED_ERASED: Count++; break; default: break; } } return Count; } static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; int result; u32 bn = PhyBlockAddr * 0x20 + PageNum; result = ene_load_bincode(us, MS_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* Read Page Data */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x02; /* in init.c ENE_MSInit() is 0x01 */ bcb->CDB[5] = (unsigned char)(bn); bcb->CDB[4] = (unsigned char)(bn>>8); bcb->CDB[3] = (unsigned char)(bn>>16); bcb->CDB[2] = (unsigned char)(bn>>24); result = ene_send_scsi_cmd(us, FDIR_READ, PageBuf, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* Read Extra Data */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x03; bcb->CDB[5] = (unsigned char)(PageNum); bcb->CDB[4] = (unsigned char)(PhyBlockAddr); bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8); bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); bcb->CDB[6] = 0x01; result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; ExtraDat->reserved = 0; ExtraDat->intr = 0x80; /* Not yet,fireware support */ ExtraDat->status0 = 0x10; /* Not yet,fireware support */ ExtraDat->status1 = 0x00; /* Not yet,fireware support */ ExtraDat->ovrflg = bbuf[0]; ExtraDat->mngflg = bbuf[1]; ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); return USB_STOR_TRANSPORT_GOOD; } static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageData) { struct ms_bootblock_sysent *SysEntry; struct ms_bootblock_sysinf *SysInfo; u32 i, result; u8 PageNumber; u8 *PageBuffer; struct ms_lib_type_extdat ExtraData; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL); if (PageBuffer == NULL) return (u32)-1; result = (u32)-1; SysInfo = &(((struct ms_bootblock_page0 *)PageData)->sysinf); if ((SysInfo->bMsClass != MS_SYSINF_MSCLASS_TYPE_1) || (be16_to_cpu(SysInfo->wPageSize) != MS_SYSINF_PAGE_SIZE) || ((SysInfo->bSecuritySupport & MS_SYSINF_SECURITY) == MS_SYSINF_SECURITY_SUPPORT) || (SysInfo->bReserved1 != MS_SYSINF_RESERVED1) || (SysInfo->bReserved2 != MS_SYSINF_RESERVED2) || (SysInfo->bFormatType != MS_SYSINF_FORMAT_FAT) || (SysInfo->bUsage != MS_SYSINF_USAGE_GENERAL)) goto exit; /* */ switch (info->MS_Lib.cardType = SysInfo->bCardType) { case MS_SYSINF_CARDTYPE_RDONLY: ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY); break; case MS_SYSINF_CARDTYPE_RDWR: ms_lib_ctrl_reset(info, MS_LIB_CTRL_RDONLY); break; case MS_SYSINF_CARDTYPE_HYBRID: default: goto exit; } info->MS_Lib.blockSize = be16_to_cpu(SysInfo->wBlockSize); info->MS_Lib.NumberOfPhyBlock = be16_to_cpu(SysInfo->wBlockNumber); info->MS_Lib.NumberOfLogBlock = be16_to_cpu(SysInfo->wTotalBlockNumber)-2; info->MS_Lib.PagesPerBlock = info->MS_Lib.blockSize * SIZE_OF_KIRO / MS_BYTES_PER_PAGE; info->MS_Lib.NumberOfSegment = info->MS_Lib.NumberOfPhyBlock / MS_PHYSICAL_BLOCKS_PER_SEGMENT; info->MS_Model = be16_to_cpu(SysInfo->wMemorySize); /*Allocate to all number of logicalblock and physicalblock */ if (ms_lib_alloc_logicalmap(us)) goto exit; /* Mark the book block */ ms_lib_set_bootblockmark(us, PhyBlock); SysEntry = &(((struct ms_bootblock_page0 *)PageData)->sysent); for (i = 0; i < MS_NUMBER_OF_SYSTEM_ENTRY; i++) { u32 EntryOffset, EntrySize; EntryOffset = be32_to_cpu(SysEntry->entry[i].dwStart); if (EntryOffset == 0xffffff) continue; EntrySize = be32_to_cpu(SysEntry->entry[i].dwSize); if (EntrySize == 0) continue; if (EntryOffset + MS_BYTES_PER_PAGE + EntrySize > info->MS_Lib.blockSize * (u32)SIZE_OF_KIRO) continue; if (i == 0) { u8 PrevPageNumber = 0; u16 phyblk; if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_INVALID_BLOCK) goto exit; while (EntrySize > 0) { PageNumber = (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1); if (PageNumber != PrevPageNumber) { switch (ms_read_readpage(us, PhyBlock, PageNumber, (u32 *)PageBuffer, &ExtraData)) { case MS_STATUS_SUCCESS: break; case MS_STATUS_WRITE_PROTECT: case MS_ERROR_FLASH_READ: case MS_STATUS_ERROR: default: goto exit; } PrevPageNumber = PageNumber; } phyblk = be16_to_cpu(*(u16 *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE))); if (phyblk < 0x0fff) ms_lib_set_initialerrorblock(us, phyblk); EntryOffset += 2; EntrySize -= 2; } } else if (i == 1) { /* CIS/IDI */ struct ms_bootblock_idi *idi; if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_CIS_IDI) goto exit; switch (ms_read_readpage(us, PhyBlock, (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1), (u32 *)PageBuffer, &ExtraData)) { case MS_STATUS_SUCCESS: break; case MS_STATUS_WRITE_PROTECT: case MS_ERROR_FLASH_READ: case MS_STATUS_ERROR: default: goto exit; } idi = &((struct ms_bootblock_cis_idi *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE)))->idi.idi; if (le16_to_cpu(idi->wIDIgeneralConfiguration) != MS_IDI_GENERAL_CONF) goto exit; info->MS_Lib.BytesPerSector = le16_to_cpu(idi->wIDIbytesPerSector); if (info->MS_Lib.BytesPerSector != MS_BYTES_PER_PAGE) goto exit; } } /* End for .. */ result = 0; exit: if (result) ms_lib_free_logicalmap(us); kfree(PageBuffer); result = 0; return result; } static void ms_lib_free_writebuf(struct us_data *us) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; info->MS_Lib.wrtblk = (u16)-1; /* set to -1 */ /* memset((fdoExt)->MS_Lib.pagemap, 0, sizeof((fdoExt)->MS_Lib.pagemap)) */ ms_lib_clear_pagemap(info); /* (pdx)->MS_Lib.pagemap memset 0 in ms.h */ if (info->MS_Lib.blkpag) { kfree(info->MS_Lib.blkpag); /* Arnold test ... */ info->MS_Lib.blkpag = NULL; } if (info->MS_Lib.blkext) { kfree(info->MS_Lib.blkext); /* Arnold test ... */ info->MS_Lib.blkext = NULL; } } static void ms_lib_free_allocatedarea(struct us_data *us) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; ms_lib_free_writebuf(us); /* Free MS_Lib.pagemap */ ms_lib_free_logicalmap(us); /* kfree MS_Lib.Phy2LogMap and MS_Lib.Log2PhyMap */ /* set struct us point flag to 0 */ info->MS_Lib.flags = 0; info->MS_Lib.BytesPerSector = 0; info->MS_Lib.SectorsPerCylinder = 0; info->MS_Lib.cardType = 0; info->MS_Lib.blockSize = 0; info->MS_Lib.PagesPerBlock = 0; info->MS_Lib.NumberOfPhyBlock = 0; info->MS_Lib.NumberOfLogBlock = 0; } static int ms_lib_alloc_writebuf(struct us_data *us) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; info->MS_Lib.wrtblk = (u16)-1; info->MS_Lib.blkpag = kmalloc_array(info->MS_Lib.PagesPerBlock, info->MS_Lib.BytesPerSector, GFP_KERNEL); info->MS_Lib.blkext = kmalloc_array(info->MS_Lib.PagesPerBlock, sizeof(struct ms_lib_type_extdat), GFP_KERNEL); if ((info->MS_Lib.blkpag == NULL) || (info->MS_Lib.blkext == NULL)) { ms_lib_free_writebuf(us); return (u32)-1; } ms_lib_clear_writebuf(us); return 0; } static int ms_lib_force_setlogical_pair(struct us_data *us, u16 logblk, u16 phyblk) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if (logblk == MS_LB_NOT_USED) return 0; if ((logblk >= info->MS_Lib.NumberOfLogBlock) || (phyblk >= info->MS_Lib.NumberOfPhyBlock)) return (u32)-1; info->MS_Lib.Phy2LogMap[phyblk] = logblk; info->MS_Lib.Log2PhyMap[logblk] = phyblk; return 0; } static int ms_read_copyblock(struct us_data *us, u16 oldphy, u16 newphy, u16 PhyBlockAddr, u8 PageNum, unsigned char *buf, u16 len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; result = ene_load_bincode(us, MS_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200*len; bcb->Flags = US_BULK_FLAG_OUT; bcb->CDB[0] = 0xF0; bcb->CDB[1] = 0x08; bcb->CDB[4] = (unsigned char)(oldphy); bcb->CDB[3] = (unsigned char)(oldphy>>8); bcb->CDB[2] = 0; /* (BYTE)(oldphy>>16) */ bcb->CDB[7] = (unsigned char)(newphy); bcb->CDB[6] = (unsigned char)(newphy>>8); bcb->CDB[5] = 0; /* (BYTE)(newphy>>16) */ bcb->CDB[9] = (unsigned char)(PhyBlockAddr); bcb->CDB[8] = (unsigned char)(PhyBlockAddr>>8); bcb->CDB[10] = PageNum; result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } static int ms_read_eraseblock(struct us_data *us, u32 PhyBlockAddr) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; u32 bn = PhyBlockAddr; result = ene_load_bincode(us, MS_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF2; bcb->CDB[1] = 0x06; bcb->CDB[4] = (unsigned char)(bn); bcb->CDB[3] = (unsigned char)(bn>>8); bcb->CDB[2] = (unsigned char)(bn>>16); result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } static int ms_lib_check_disableblock(struct us_data *us, u16 PhyBlock) { unsigned char *PageBuf = NULL; u16 result = MS_STATUS_SUCCESS; u16 blk, index = 0; struct ms_lib_type_extdat extdat; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; PageBuf = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL); if (PageBuf == NULL) { result = MS_NO_MEMORY_ERROR; goto exit; } ms_read_readpage(us, PhyBlock, 1, (u32 *)PageBuf, &extdat); do { blk = be16_to_cpu(PageBuf[index]); if (blk == MS_LB_NOT_USED) break; if (blk == info->MS_Lib.Log2PhyMap[0]) { result = MS_ERROR_FLASH_READ; break; } index++; } while (1); exit: kfree(PageBuf); return result; } static int ms_lib_setacquired_errorblock(struct us_data *us, u16 phyblk) { u16 log; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if (phyblk >= info->MS_Lib.NumberOfPhyBlock) return (u32)-1; log = info->MS_Lib.Phy2LogMap[phyblk]; if (log < info->MS_Lib.NumberOfLogBlock) info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED; if (info->MS_Lib.Phy2LogMap[phyblk] != MS_LB_INITIAL_ERROR) info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_ACQUIRED_ERROR; return 0; } static int ms_lib_overwrite_extra(struct us_data *us, u32 PhyBlockAddr, u8 PageNum, u8 OverwriteFlag) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; result = ene_load_bincode(us, MS_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF2; bcb->CDB[1] = 0x05; bcb->CDB[5] = (unsigned char)(PageNum); bcb->CDB[4] = (unsigned char)(PhyBlockAddr); bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8); bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); bcb->CDB[6] = OverwriteFlag; bcb->CDB[7] = 0xFF; bcb->CDB[8] = 0xFF; bcb->CDB[9] = 0xFF; result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } static int ms_lib_error_phyblock(struct us_data *us, u16 phyblk) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if (phyblk >= info->MS_Lib.NumberOfPhyBlock) return MS_STATUS_ERROR; ms_lib_setacquired_errorblock(us, phyblk); if (ms_lib_iswritable(info)) return ms_lib_overwrite_extra(us, phyblk, 0, (u8)(~MS_REG_OVR_BKST & BYTE_MASK)); return MS_STATUS_SUCCESS; } static int ms_lib_erase_phyblock(struct us_data *us, u16 phyblk) { u16 log; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if (phyblk >= info->MS_Lib.NumberOfPhyBlock) return MS_STATUS_ERROR; log = info->MS_Lib.Phy2LogMap[phyblk]; if (log < info->MS_Lib.NumberOfLogBlock) info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED; info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED; if (ms_lib_iswritable(info)) { switch (ms_read_eraseblock(us, phyblk)) { case MS_STATUS_SUCCESS: info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED_ERASED; return MS_STATUS_SUCCESS; case MS_ERROR_FLASH_ERASE: case MS_STATUS_INT_ERROR: ms_lib_error_phyblock(us, phyblk); return MS_ERROR_FLASH_ERASE; case MS_STATUS_ERROR: default: ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY); /* MS_LibCtrlSet will used by ENE_MSInit ,need check, and why us to info*/ ms_lib_setacquired_errorblock(us, phyblk); return MS_STATUS_ERROR; } } ms_lib_setacquired_errorblock(us, phyblk); return MS_STATUS_SUCCESS; } static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, u8 PageNum, struct ms_lib_type_extdat *ExtraDat) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; int result; memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x03; bcb->CDB[5] = (unsigned char)(PageNum); bcb->CDB[4] = (unsigned char)(PhyBlock); bcb->CDB[3] = (unsigned char)(PhyBlock>>8); bcb->CDB[2] = (unsigned char)(PhyBlock>>16); bcb->CDB[6] = 0x01; result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; ExtraDat->reserved = 0; ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ ExtraDat->ovrflg = bbuf[0]; ExtraDat->mngflg = bbuf[1]; ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); return USB_STOR_TRANSPORT_GOOD; } static int ms_libsearch_block_from_physical(struct us_data *us, u16 phyblk) { u16 blk; struct ms_lib_type_extdat extdat; /* need check */ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if (phyblk >= info->MS_Lib.NumberOfPhyBlock) return MS_LB_ERROR; for (blk = phyblk + 1; blk != phyblk; blk++) { if ((blk & MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK) == 0) blk -= MS_PHYSICAL_BLOCKS_PER_SEGMENT; if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED_ERASED) { return blk; } else if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED) { switch (ms_lib_read_extra(us, blk, 0, &extdat)) { case MS_STATUS_SUCCESS: case MS_STATUS_SUCCESS_WITH_ECC: break; case MS_NOCARD_ERROR: return MS_NOCARD_ERROR; case MS_STATUS_INT_ERROR: return MS_LB_ERROR; case MS_ERROR_FLASH_READ: default: ms_lib_setacquired_errorblock(us, blk); continue; } /* End switch */ if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { ms_lib_setacquired_errorblock(us, blk); continue; } switch (ms_lib_erase_phyblock(us, blk)) { case MS_STATUS_SUCCESS: return blk; case MS_STATUS_ERROR: return MS_LB_ERROR; case MS_ERROR_FLASH_ERASE: default: ms_lib_error_phyblock(us, blk); break; } } } /* End for */ return MS_LB_ERROR; } static int ms_libsearch_block_from_logical(struct us_data *us, u16 logblk) { u16 phyblk; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; phyblk = ms_libconv_to_physical(info, logblk); if (phyblk >= MS_LB_ERROR) { if (logblk >= info->MS_Lib.NumberOfLogBlock) return MS_LB_ERROR; phyblk = (logblk + MS_NUMBER_OF_BOOT_BLOCK) / MS_LOGICAL_BLOCKS_PER_SEGMENT; phyblk *= MS_PHYSICAL_BLOCKS_PER_SEGMENT; phyblk += MS_PHYSICAL_BLOCKS_PER_SEGMENT - 1; } return ms_libsearch_block_from_physical(us, phyblk); } static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /* pr_info("MS_SCSI_Test_Unit_Ready\n"); */ if ((info->MS_Status & MS_Insert) && (info->MS_Status & MS_Ready)) { return USB_STOR_TRANSPORT_GOOD; } else { ene_ms_init(us); return USB_STOR_TRANSPORT_GOOD; } return USB_STOR_TRANSPORT_GOOD; } static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; unsigned char mediaNoWP[12] = { 0x0b, 0x00, 0x00, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; unsigned char mediaWP[12] = { 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; if (info->MS_Status & MS_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); return USB_STOR_TRANSPORT_GOOD; } static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) { u32 bl_num; u32 bl_len; unsigned int offset = 0; unsigned char buf[8]; struct scatterlist *sg = NULL; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; usb_stor_dbg(us, "ms_scsi_read_capacity\n"); bl_len = 0x200; if (info->MS_Status & MS_IsMSPro) bl_num = info->MSP_TotalBlock - 1; else bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1; info->bl_num = bl_num; usb_stor_dbg(us, "bl_len = %x\n", bl_len); usb_stor_dbg(us, "bl_num = %x\n", bl_num); /*srb->request_bufflen = 8; */ buf[0] = (bl_num >> 24) & 0xff; buf[1] = (bl_num >> 16) & 0xff; buf[2] = (bl_num >> 8) & 0xff; buf[3] = (bl_num >> 0) & 0xff; buf[4] = (bl_len >> 24) & 0xff; buf[5] = (bl_len >> 16) & 0xff; buf[6] = (bl_len >> 8) & 0xff; buf[7] = (bl_len >> 0) & 0xff; usb_stor_access_xfer_buf(buf, 8, srb, &sg, &offset, TO_XFER_BUF); return USB_STOR_TRANSPORT_GOOD; } static void ms_lib_phy_to_log_range(u16 PhyBlock, u16 *LogStart, u16 *LogEnde) { PhyBlock /= MS_PHYSICAL_BLOCKS_PER_SEGMENT; if (PhyBlock) { *LogStart = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT + (PhyBlock - 1) * MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/ *LogEnde = *LogStart + MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/ } else { *LogStart = 0; *LogEnde = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT;/*494*/ } } static int ms_lib_read_extrablock(struct us_data *us, u32 PhyBlock, u8 PageNum, u8 blen, void *buf) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; /* Read Extra Data */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4 * blen; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x03; bcb->CDB[5] = (unsigned char)(PageNum); bcb->CDB[4] = (unsigned char)(PhyBlock); bcb->CDB[3] = (unsigned char)(PhyBlock>>8); bcb->CDB[2] = (unsigned char)(PhyBlock>>16); bcb->CDB[6] = blen; result = ene_send_scsi_cmd(us, FDIR_READ, buf, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st) { u16 PhyBlock, newblk, i; u16 LogStart, LogEnde; struct ms_lib_type_extdat extdat; u32 count = 0, index = 0; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); for (i = 0; i < MS_PHYSICAL_BLOCKS_PER_SEGMENT; i++, PhyBlock++) { switch (ms_libconv_to_logical(info, PhyBlock)) { case MS_STATUS_ERROR: continue; default: break; } if (count == PhyBlock) { ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, bbuf); count += 0x80; } index = (PhyBlock % 0x80) * 4; extdat.ovrflg = bbuf[index]; extdat.mngflg = bbuf[index+1]; extdat.logadr = memstick_logaddr(bbuf[index+2], bbuf[index+3]); if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { ms_lib_setacquired_errorblock(us, PhyBlock); continue; } if ((extdat.mngflg & MS_REG_MNG_ATFLG) == MS_REG_MNG_ATFLG_ATTBL) { ms_lib_erase_phyblock(us, PhyBlock); continue; } if (extdat.logadr != MS_LB_NOT_USED) { if ((extdat.logadr < LogStart) || (LogEnde <= extdat.logadr)) { ms_lib_erase_phyblock(us, PhyBlock); continue; } newblk = ms_libconv_to_physical(info, extdat.logadr); if (newblk != MS_LB_NOT_USED) { if (extdat.logadr == 0) { ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock); if (ms_lib_check_disableblock(us, btBlk1st)) { ms_lib_set_logicalpair(us, extdat.logadr, newblk); continue; } } ms_lib_read_extra(us, newblk, 0, &extdat); if ((extdat.ovrflg & MS_REG_OVR_UDST) == MS_REG_OVR_UDST_UPDATING) { ms_lib_erase_phyblock(us, PhyBlock); continue; } else { ms_lib_erase_phyblock(us, newblk); } } ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock); } } } /* End for ... */ return MS_STATUS_SUCCESS; } static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb) { int result; unsigned char *cdb = srb->cmnd; struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) | ((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff); u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff); u32 blenByte = blen * 0x200; if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load MPS RW pattern Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } /* set up the command wrapper */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x02; bcb->CDB[5] = (unsigned char)(bn); bcb->CDB[4] = (unsigned char)(bn>>8); bcb->CDB[3] = (unsigned char)(bn>>16); bcb->CDB[2] = (unsigned char)(bn>>24); result = ene_send_scsi_cmd(us, FDIR_READ, scsi_sglist(srb), 1); } else { void *buf; int offset = 0; u16 phyblk, logblk; u8 PageNum; u16 len; u32 blkno; buf = kmalloc(blenByte, GFP_KERNEL); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; result = ene_load_bincode(us, MS_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { pr_info("Load MS RW pattern Fail !!\n"); result = USB_STOR_TRANSPORT_ERROR; goto exit; } logblk = (u16)(bn / info->MS_Lib.PagesPerBlock); PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock); while (1) { if (blen > (info->MS_Lib.PagesPerBlock-PageNum)) len = info->MS_Lib.PagesPerBlock-PageNum; else len = blen; phyblk = ms_libconv_to_physical(info, logblk); blkno = phyblk * 0x20 + PageNum; /* set up the command wrapper */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200 * len; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x02; bcb->CDB[5] = (unsigned char)(blkno); bcb->CDB[4] = (unsigned char)(blkno>>8); bcb->CDB[3] = (unsigned char)(blkno>>16); bcb->CDB[2] = (unsigned char)(blkno>>24); result = ene_send_scsi_cmd(us, FDIR_READ, buf+offset, 0); if (result != USB_STOR_XFER_GOOD) { pr_info("MS_SCSI_Read --- result = %x\n", result); result = USB_STOR_TRANSPORT_ERROR; goto exit; } blen -= len; if (blen <= 0) break; logblk++; PageNum = 0; offset += MS_BYTES_PER_PAGE*len; } usb_stor_set_xfer_buf(buf, blenByte, srb); exit: kfree(buf); } return result; } static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb) { int result; struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; unsigned char *cdb = srb->cmnd; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) | ((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff); u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff); u32 blenByte = blen * 0x200; if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { pr_info("Load MSP RW pattern Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } /* set up the command wrapper */ memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = US_BULK_FLAG_OUT; bcb->CDB[0] = 0xF0; bcb->CDB[1] = 0x04; bcb->CDB[5] = (unsigned char)(bn); bcb->CDB[4] = (unsigned char)(bn>>8); bcb->CDB[3] = (unsigned char)(bn>>16); bcb->CDB[2] = (unsigned char)(bn>>24); result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); } else { void *buf; int offset = 0; u16 PhyBlockAddr; u8 PageNum; u16 len, oldphy, newphy; buf = kmalloc(blenByte, GFP_KERNEL); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_set_xfer_buf(buf, blenByte, srb); result = ene_load_bincode(us, MS_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { pr_info("Load MS RW pattern Fail !!\n"); result = USB_STOR_TRANSPORT_ERROR; goto exit; } PhyBlockAddr = (u16)(bn / info->MS_Lib.PagesPerBlock); PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock); while (1) { if (blen > (info->MS_Lib.PagesPerBlock-PageNum)) len = info->MS_Lib.PagesPerBlock-PageNum; else len = blen; oldphy = ms_libconv_to_physical(info, PhyBlockAddr); /* need check us <-> info */ newphy = ms_libsearch_block_from_logical(us, PhyBlockAddr); result = ms_read_copyblock(us, oldphy, newphy, PhyBlockAddr, PageNum, buf+offset, len); if (result != USB_STOR_XFER_GOOD) { pr_info("MS_SCSI_Write --- result = %x\n", result); result = USB_STOR_TRANSPORT_ERROR; goto exit; } info->MS_Lib.Phy2LogMap[oldphy] = MS_LB_NOT_USED_ERASED; ms_lib_force_setlogical_pair(us, PhyBlockAddr, newphy); blen -= len; if (blen <= 0) break; PhyBlockAddr++; PageNum = 0; offset += MS_BYTES_PER_PAGE*len; } exit: kfree(buf); } return result; } /* * ENE MS Card */ static int ene_get_card_type(struct us_data *us, u16 index, void *buf) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x01; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xED; bcb->CDB[2] = (unsigned char)(index>>8); bcb->CDB[3] = (unsigned char)index; result = ene_send_scsi_cmd(us, FDIR_READ, buf, 0); return result; } static int ene_get_card_status(struct us_data *us, u8 *buf) { u16 tmpreg; u32 reg4b; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; /*usb_stor_dbg(us, "transport --- ENE_ReadSDReg\n");*/ reg4b = *(u32 *)&buf[0x18]; info->SD_READ_BL_LEN = (u8)((reg4b >> 8) & 0x0f); tmpreg = (u16) reg4b; reg4b = *(u32 *)(&buf[0x14]); if ((info->SD_Status & SD_HiCapacity) && !(info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff; info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22); info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07; if ((info->SD_Status & SD_HiCapacity) && (info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = *(u32 *)(&buf[0x100]); if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) { info->SD_Block_Mult = 1 << (info->SD_READ_BL_LEN-SD_BLOCK_LEN); info->SD_READ_BL_LEN = SD_BLOCK_LEN; } else { info->SD_Block_Mult = 1; } return USB_STOR_TRANSPORT_GOOD; } static int ene_load_bincode(struct us_data *us, unsigned char flag) { int err; char *fw_name = NULL; unsigned char *buf = NULL; const struct firmware *sd_fw = NULL; int result = USB_STOR_TRANSPORT_ERROR; struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; if (info->BIN_FLAG == flag) return USB_STOR_TRANSPORT_GOOD; switch (flag) { /* For SD */ case SD_INIT1_PATTERN: usb_stor_dbg(us, "SD_INIT1_PATTERN\n"); fw_name = SD_INIT1_FIRMWARE; break; case SD_INIT2_PATTERN: usb_stor_dbg(us, "SD_INIT2_PATTERN\n"); fw_name = SD_INIT2_FIRMWARE; break; case SD_RW_PATTERN: usb_stor_dbg(us, "SD_RW_PATTERN\n"); fw_name = SD_RW_FIRMWARE; break; /* For MS */ case MS_INIT_PATTERN: usb_stor_dbg(us, "MS_INIT_PATTERN\n"); fw_name = MS_INIT_FIRMWARE; break; case MSP_RW_PATTERN: usb_stor_dbg(us, "MSP_RW_PATTERN\n"); fw_name = MSP_RW_FIRMWARE; break; case MS_RW_PATTERN: usb_stor_dbg(us, "MS_RW_PATTERN\n"); fw_name = MS_RW_FIRMWARE; break; default: usb_stor_dbg(us, "----------- Unknown PATTERN ----------\n"); goto nofw; } err = request_firmware(&sd_fw, fw_name, &us->pusb_dev->dev); if (err) { usb_stor_dbg(us, "load firmware %s failed\n", fw_name); goto nofw; } buf = kmemdup(sd_fw->data, sd_fw->size, GFP_KERNEL); if (buf == NULL) goto nofw; memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = sd_fw->size; bcb->Flags = US_BULK_FLAG_OUT; bcb->CDB[0] = 0xEF; result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0); if (us->srb != NULL) scsi_set_resid(us->srb, 0); info->BIN_FLAG = flag; kfree(buf); nofw: release_firmware(sd_fw); return result; } static int ms_card_init(struct us_data *us) { u32 result; u16 TmpBlock; unsigned char *PageBuffer0 = NULL, *PageBuffer1 = NULL; struct ms_lib_type_extdat extdat; u16 btBlk1st, btBlk2nd; u32 btBlk1stErred; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; printk(KERN_INFO "MS_CardInit start\n"); ms_lib_free_allocatedarea(us); /* Clean buffer and set struct us_data flag to 0 */ /* get two PageBuffer */ PageBuffer0 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL); PageBuffer1 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL); if ((PageBuffer0 == NULL) || (PageBuffer1 == NULL)) { result = MS_NO_MEMORY_ERROR; goto exit; } btBlk1st = btBlk2nd = MS_LB_NOT_USED; btBlk1stErred = 0; for (TmpBlock = 0; TmpBlock < MS_MAX_INITIAL_ERROR_BLOCKS+2; TmpBlock++) { switch (ms_read_readpage(us, TmpBlock, 0, (u32 *)PageBuffer0, &extdat)) { case MS_STATUS_SUCCESS: break; case MS_STATUS_INT_ERROR: break; case MS_STATUS_ERROR: default: continue; } if ((extdat.ovrflg & MS_REG_OVR_BKST) == MS_REG_OVR_BKST_NG) continue; if (((extdat.mngflg & MS_REG_MNG_SYSFLG) == MS_REG_MNG_SYSFLG_USER) || (be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wBlockID) != MS_BOOT_BLOCK_ID) || (be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wFormatVersion) != MS_BOOT_BLOCK_FORMAT_VERSION) || (((struct ms_bootblock_page0 *)PageBuffer0)->header.bNumberOfDataEntry != MS_BOOT_BLOCK_DATA_ENTRIES)) continue; if (btBlk1st != MS_LB_NOT_USED) { btBlk2nd = TmpBlock; break; } btBlk1st = TmpBlock; memcpy(PageBuffer1, PageBuffer0, MS_BYTES_PER_PAGE); if (extdat.status1 & (MS_REG_ST1_DTER | MS_REG_ST1_EXER | MS_REG_ST1_FGER)) btBlk1stErred = 1; } if (btBlk1st == MS_LB_NOT_USED) { result = MS_STATUS_ERROR; goto exit; } /* write protect */ if ((extdat.status0 & MS_REG_ST0_WP) == MS_REG_ST0_WP_ON) ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT); result = MS_STATUS_ERROR; /* 1st Boot Block */ if (btBlk1stErred == 0) result = ms_lib_process_bootblock(us, btBlk1st, PageBuffer1); /* 1st */ /* 2nd Boot Block */ if (result && (btBlk2nd != MS_LB_NOT_USED)) result = ms_lib_process_bootblock(us, btBlk2nd, PageBuffer0); if (result) { result = MS_STATUS_ERROR; goto exit; } for (TmpBlock = 0; TmpBlock < btBlk1st; TmpBlock++) info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR; info->MS_Lib.Phy2LogMap[btBlk1st] = MS_LB_BOOT_BLOCK; if (btBlk2nd != MS_LB_NOT_USED) { for (TmpBlock = btBlk1st + 1; TmpBlock < btBlk2nd; TmpBlock++) info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR; info->MS_Lib.Phy2LogMap[btBlk2nd] = MS_LB_BOOT_BLOCK; } result = ms_lib_scan_logicalblocknumber(us, btBlk1st); if (result) goto exit; for (TmpBlock = MS_PHYSICAL_BLOCKS_PER_SEGMENT; TmpBlock < info->MS_Lib.NumberOfPhyBlock; TmpBlock += MS_PHYSICAL_BLOCKS_PER_SEGMENT) { if (ms_count_freeblock(us, TmpBlock) == 0) { ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT); break; } } /* write */ if (ms_lib_alloc_writebuf(us)) { result = MS_NO_MEMORY_ERROR; goto exit; } result = MS_STATUS_SUCCESS; exit: kfree(PageBuffer1); kfree(PageBuffer0); printk(KERN_INFO "MS_CardInit end\n"); return result; } static int ene_ms_init(struct us_data *us) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; u16 MSP_BlockSize, MSP_UserAreaBlocks; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; unsigned int s; printk(KERN_INFO "transport --- ENE_MSInit\n"); /* the same part to test ENE */ result = ene_load_bincode(us, MS_INIT_PATTERN); if (result != USB_STOR_XFER_GOOD) { printk(KERN_ERR "Load MS Init Code Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; bcb->CDB[1] = 0x01; result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); if (result != USB_STOR_XFER_GOOD) { printk(KERN_ERR "Execution MS Init Code Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } /* the same part to test ENE */ info->MS_Status = bbuf[0]; s = info->MS_Status; if ((s & MS_Insert) && (s & MS_Ready)) { printk(KERN_INFO "Insert = %x\n", !!(s & MS_Insert)); printk(KERN_INFO "Ready = %x\n", !!(s & MS_Ready)); printk(KERN_INFO "IsMSPro = %x\n", !!(s & MS_IsMSPro)); printk(KERN_INFO "IsMSPHG = %x\n", !!(s & MS_IsMSPHG)); printk(KERN_INFO "WtP= %x\n", !!(s & MS_WtP)); if (s & MS_IsMSPro) { MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; } else { ms_card_init(us); /* Card is MS (to ms.c)*/ } usb_stor_dbg(us, "MS Init Code OK !!\n"); } else { usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]); return USB_STOR_TRANSPORT_ERROR; } return USB_STOR_TRANSPORT_GOOD; } static int ene_sd_init(struct us_data *us) { int result; struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; usb_stor_dbg(us, "transport --- ENE_SDInit\n"); /* SD Init Part-1 */ result = ene_load_bincode(us, SD_INIT1_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load SD Init Code Part-1 Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF2; result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } /* SD Init Part-2 */ result = ene_load_bincode(us, SD_INIT2_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load SD Init Code Part-2 Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = US_BULK_FLAG_IN; bcb->CDB[0] = 0xF1; result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); return USB_STOR_TRANSPORT_ERROR; } info->SD_Status = bbuf[0]; if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) { unsigned int s = info->SD_Status; ene_get_card_status(us, bbuf); usb_stor_dbg(us, "Insert = %x\n", !!(s & SD_Insert)); usb_stor_dbg(us, "Ready = %x\n", !!(s & SD_Ready)); usb_stor_dbg(us, "IsMMC = %x\n", !!(s & SD_IsMMC)); usb_stor_dbg(us, "HiCapacity = %x\n", !!(s & SD_HiCapacity)); usb_stor_dbg(us, "HiSpeed = %x\n", !!(s & SD_HiSpeed)); usb_stor_dbg(us, "WtP = %x\n", !!(s & SD_WtP)); } else { usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); return USB_STOR_TRANSPORT_ERROR; } return USB_STOR_TRANSPORT_GOOD; } static int ene_init(struct us_data *us) { int result; u8 misc_reg03; struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); u8 *bbuf = info->bbuf; result = ene_get_card_type(us, REG_CARD_STATUS, bbuf); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; misc_reg03 = bbuf[0]; if (misc_reg03 & 0x01) { if (!(info->SD_Status & SD_Ready)) { result = ene_sd_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } } if (misc_reg03 & 0x02) { if (!(info->MS_Status & MS_Ready)) { result = ene_ms_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } } return result; } /*----- sd_scsi_irp() ---------*/ static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb) { int result; struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra; switch (srb->cmnd[0]) { case TEST_UNIT_READY: result = sd_scsi_test_unit_ready(us, srb); break; /* 0x00 */ case REQUEST_SENSE: result = do_scsi_request_sense(us, srb); break; /* 0x03 */ case INQUIRY: result = do_scsi_inquiry(us, srb); break; /* 0x12 */ case MODE_SENSE: result = sd_scsi_mode_sense(us, srb); break; /* 0x1A */ /* case START_STOP: result = SD_SCSI_Start_Stop(us, srb); break; //0x1B */ case READ_CAPACITY: result = sd_scsi_read_capacity(us, srb); break; /* 0x25 */ case READ_10: result = sd_scsi_read(us, srb); break; /* 0x28 */ case WRITE_10: result = sd_scsi_write(us, srb); break; /* 0x2A */ default: info->SrbStatus = SS_ILLEGAL_REQUEST; result = USB_STOR_TRANSPORT_FAILED; break; } if (result == USB_STOR_TRANSPORT_GOOD) info->SrbStatus = SS_SUCCESS; return result; } /* * ms_scsi_irp() */ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb) { int result; struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra; switch (srb->cmnd[0]) { case TEST_UNIT_READY: result = ms_scsi_test_unit_ready(us, srb); break; /* 0x00 */ case REQUEST_SENSE: result = do_scsi_request_sense(us, srb); break; /* 0x03 */ case INQUIRY: result = do_scsi_inquiry(us, srb); break; /* 0x12 */ case MODE_SENSE: result = ms_scsi_mode_sense(us, srb); break; /* 0x1A */ case READ_CAPACITY: result = ms_scsi_read_capacity(us, srb); break; /* 0x25 */ case READ_10: result = ms_scsi_read(us, srb); break; /* 0x28 */ case WRITE_10: result = ms_scsi_write(us, srb); break; /* 0x2A */ default: info->SrbStatus = SS_ILLEGAL_REQUEST; result = USB_STOR_TRANSPORT_FAILED; break; } if (result == USB_STOR_TRANSPORT_GOOD) info->SrbStatus = SS_SUCCESS; return result; } static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) { int result = USB_STOR_XFER_GOOD; struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /*US_DEBUG(usb_stor_show_command(us, srb)); */ scsi_set_resid(srb, 0); if (unlikely(!(info->SD_Status & SD_Ready) || (info->MS_Status & MS_Ready))) result = ene_init(us); if (result == USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; if (info->SD_Status & SD_Ready) result = sd_scsi_irp(us, srb); if (info->MS_Status & MS_Ready) result = ms_scsi_irp(us, srb); } return result; } static struct scsi_host_template ene_ub6250_host_template; static int ene_ub6250_probe(struct usb_interface *intf, const struct usb_device_id *id) { int result; u8 misc_reg03; struct us_data *us; struct ene_ub6250_info *info; result = usb_stor_probe1(&us, intf, id, (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, &ene_ub6250_host_template); if (result) return result; /* FIXME: where should the code alloc extra buf ? */ us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); if (!us->extra) return -ENOMEM; us->extra_destructor = ene_ub6250_info_destructor; info = (struct ene_ub6250_info *)(us->extra); info->bbuf = kmalloc(512, GFP_KERNEL); if (!info->bbuf) { kfree(us->extra); return -ENOMEM; } us->transport_name = "ene_ub6250"; us->transport = ene_transport; us->max_lun = 0; result = usb_stor_probe2(us); if (result) return result; /* probe card type */ result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf); if (result != USB_STOR_XFER_GOOD) { usb_stor_disconnect(intf); return USB_STOR_TRANSPORT_ERROR; } misc_reg03 = info->bbuf[0]; if (!(misc_reg03 & 0x01)) { pr_info("ums_eneub6250: This driver only supports SD/MS cards. " "It does not support SM cards.\n"); } return result; } #ifdef CONFIG_PM static int ene_ub6250_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); mutex_lock(&us->dev_mutex); if (us->suspend_resume_hook) (us->suspend_resume_hook)(us, US_RESUME); mutex_unlock(&us->dev_mutex); info->Power_IsResum = true; /* info->SD_Status &= ~SD_Ready; */ info->SD_Status = 0; info->MS_Status = 0; info->SM_Status = 0; return 0; } static int ene_ub6250_reset_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /* Report the reset to the SCSI core */ usb_stor_reset_resume(iface); /* * FIXME: Notify the subdrivers that they need to reinitialize * the device */ info->Power_IsResum = true; /* info->SD_Status &= ~SD_Ready; */ info->SD_Status = 0; info->MS_Status = 0; info->SM_Status = 0; return 0; } #else #define ene_ub6250_resume NULL #define ene_ub6250_reset_resume NULL #endif static struct usb_driver ene_ub6250_driver = { .name = DRV_NAME, .probe = ene_ub6250_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = ene_ub6250_resume, .reset_resume = ene_ub6250_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = ene_ub6250_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(ene_ub6250_driver, ene_ub6250_host_template, DRV_NAME); |
9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | /* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/posix_acl.h (C) 2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #ifndef __LINUX_POSIX_ACL_H #define __LINUX_POSIX_ACL_H #include <linux/bug.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/refcount.h> #include <uapi/linux/posix_acl.h> struct user_namespace; struct posix_acl_entry { short e_tag; unsigned short e_perm; union { kuid_t e_uid; kgid_t e_gid; }; }; struct posix_acl { refcount_t a_refcount; struct rcu_head a_rcu; unsigned int a_count; struct posix_acl_entry a_entries[]; }; #define FOREACH_ACL_ENTRY(pa, acl, pe) \ for(pa=(acl)->a_entries, pe=pa+(acl)->a_count; pa<pe; pa++) /* * Duplicate an ACL handle. */ static inline struct posix_acl * posix_acl_dup(struct posix_acl *acl) { if (acl) refcount_inc(&acl->a_refcount); return acl; } /* * Free an ACL handle. */ static inline void posix_acl_release(struct posix_acl *acl) { if (acl && refcount_dec_and_test(&acl->a_refcount)) kfree_rcu(acl, a_rcu); } /* posix_acl.c */ extern void posix_acl_init(struct posix_acl *, int); extern struct posix_acl *posix_acl_alloc(int, gfp_t); extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *); extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); extern struct posix_acl *get_posix_acl(struct inode *, int); int set_posix_acl(struct mnt_idmap *, struct dentry *, int, struct posix_acl *); struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags); #ifdef CONFIG_FS_POSIX_ACL int posix_acl_chmod(struct mnt_idmap *, struct dentry *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, struct posix_acl **); int posix_acl_update_mode(struct mnt_idmap *, struct inode *, umode_t *, struct posix_acl **); int simple_set_acl(struct mnt_idmap *, struct dentry *, struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); struct posix_acl *get_cached_acl(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); void forget_cached_acl(struct inode *inode, int type); void forget_all_cached_acls(struct inode *inode); int posix_acl_valid(struct user_namespace *, const struct posix_acl *); int posix_acl_permission(struct mnt_idmap *, struct inode *, const struct posix_acl *, int); static inline void cache_no_acl(struct inode *inode) { inode->i_acl = NULL; inode->i_default_acl = NULL; } int vfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl); struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name); int vfs_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name); int posix_acl_listxattr(struct inode *inode, char **buffer, ssize_t *remaining_size); #else static inline int posix_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry, umode_t mode) { return 0; } #define simple_set_acl NULL static inline int simple_acl_create(struct inode *dir, struct inode *inode) { return 0; } static inline void cache_no_acl(struct inode *inode) { } static inline int posix_acl_create(struct inode *inode, umode_t *mode, struct posix_acl **default_acl, struct posix_acl **acl) { *default_acl = *acl = NULL; return 0; } static inline void forget_all_cached_acls(struct inode *inode) { } static inline int vfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, struct posix_acl *acl) { return -EOPNOTSUPP; } static inline struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { return ERR_PTR(-EOPNOTSUPP); } static inline int vfs_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { return -EOPNOTSUPP; } static inline int posix_acl_listxattr(struct inode *inode, char **buffer, ssize_t *remaining_size) { return 0; } #endif /* CONFIG_FS_POSIX_ACL */ struct posix_acl *get_inode_acl(struct inode *inode, int type); #endif /* __LINUX_POSIX_ACL_H */ |
1282 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/ns_common.h> #include <linux/fs_pin.h> struct mnt_namespace { struct ns_common ns; struct mount * root; struct rb_root mounts; /* Protected by namespace_sem */ struct user_namespace *user_ns; struct ucounts *ucounts; u64 seq; /* Sequence number to prevent loops */ wait_queue_head_t poll; u64 event; unsigned int nr_mounts; /* # of mounts in the namespace */ unsigned int pending_mounts; struct rb_node mnt_ns_tree_node; /* node in the mnt_ns_tree */ refcount_t passive; /* number references not pinning @mounts */ } __randomize_layout; struct mnt_pcp { int mnt_count; int mnt_writers; }; struct mountpoint { struct hlist_node m_hash; struct dentry *m_dentry; struct hlist_head m_list; int m_count; }; struct mount { struct hlist_node mnt_hash; struct mount *mnt_parent; struct dentry *mnt_mountpoint; struct vfsmount mnt; union { struct rcu_head mnt_rcu; struct llist_node mnt_llist; }; #ifdef CONFIG_SMP struct mnt_pcp __percpu *mnt_pcp; #else int mnt_count; int mnt_writers; #endif struct list_head mnt_mounts; /* list of children, anchored here */ struct list_head mnt_child; /* and going through their mnt_child */ struct list_head mnt_instance; /* mount instance on sb->s_mounts */ const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ union { struct rb_node mnt_node; /* Under ns->mounts */ struct list_head mnt_list; }; struct list_head mnt_expire; /* link in fs-specific expiry list */ struct list_head mnt_share; /* circular list of shared mounts */ struct list_head mnt_slave_list;/* list of slave mounts */ struct list_head mnt_slave; /* slave list entry */ struct mount *mnt_master; /* slave is on master->mnt_slave_list */ struct mnt_namespace *mnt_ns; /* containing namespace */ struct mountpoint *mnt_mp; /* where is it mounted */ union { struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ struct hlist_node mnt_umount; }; struct list_head mnt_umounting; /* list entry for umount propagation */ #ifdef CONFIG_FSNOTIFY struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; #endif int mnt_id; /* mount identifier, reused */ u64 mnt_id_unique; /* mount ID unique until reboot */ int mnt_group_id; /* peer group identifier */ int mnt_expiry_mark; /* true if marked for expiry */ struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; } __randomize_layout; #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ static inline struct mount *real_mount(struct vfsmount *mnt) { return container_of(mnt, struct mount, mnt); } static inline int mnt_has_parent(struct mount *mnt) { return mnt != mnt->mnt_parent; } static inline int is_mounted(struct vfsmount *mnt) { /* neither detached nor internal? */ return !IS_ERR_OR_NULL(real_mount(mnt)->mnt_ns); } extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *); extern int __legitimize_mnt(struct vfsmount *, unsigned); static inline bool __path_is_mountpoint(const struct path *path) { struct mount *m = __lookup_mnt(path->mnt, path->dentry); return m && likely(!(m->mnt.mnt_flags & MNT_SYNC_UMOUNT)); } extern void __detach_mounts(struct dentry *dentry); static inline void detach_mounts(struct dentry *dentry) { if (!d_mountpoint(dentry)) return; __detach_mounts(dentry); } static inline void get_mnt_ns(struct mnt_namespace *ns) { refcount_inc(&ns->ns.count); } extern seqlock_t mount_lock; struct proc_mounts { struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); }; extern const struct seq_operations mounts_op; extern bool __is_local_mountpoint(struct dentry *dentry); static inline bool is_local_mountpoint(struct dentry *dentry) { if (!d_mountpoint(dentry)) return false; return __is_local_mountpoint(dentry); } static inline bool is_anon_ns(struct mnt_namespace *ns) { return ns->seq == 0; } static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list) { WARN_ON(!(mnt->mnt.mnt_flags & MNT_ONRB)); mnt->mnt.mnt_flags &= ~MNT_ONRB; rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts); list_add_tail(&mnt->mnt_list, dt_list); } bool has_locked_children(struct mount *mnt, struct dentry *dentry); struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mnt_ns, bool previous); static inline struct mnt_namespace *lookup_next_mnt_ns(struct mnt_namespace *mntns) { return __lookup_next_mnt_ns(mntns, false); } static inline struct mnt_namespace *lookup_prev_mnt_ns(struct mnt_namespace *mntns) { return __lookup_next_mnt_ns(mntns, true); } static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns) { return container_of(ns, struct mnt_namespace, ns); } |
46 46 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cgroup #if !defined(_TRACE_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_CGROUP_H #include <linux/cgroup.h> #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(cgroup_root, TP_PROTO(struct cgroup_root *root), TP_ARGS(root), TP_STRUCT__entry( __field( int, root ) __field( u16, ss_mask ) __string( name, root->name ) ), TP_fast_assign( __entry->root = root->hierarchy_id; __entry->ss_mask = root->subsys_mask; __assign_str(name); ), TP_printk("root=%d ss_mask=%#x name=%s", __entry->root, __entry->ss_mask, __get_str(name)) ); DEFINE_EVENT(cgroup_root, cgroup_setup_root, TP_PROTO(struct cgroup_root *root), TP_ARGS(root) ); DEFINE_EVENT(cgroup_root, cgroup_destroy_root, TP_PROTO(struct cgroup_root *root), TP_ARGS(root) ); DEFINE_EVENT(cgroup_root, cgroup_remount, TP_PROTO(struct cgroup_root *root), TP_ARGS(root) ); DECLARE_EVENT_CLASS(cgroup, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path), TP_STRUCT__entry( __field( int, root ) __field( int, level ) __field( u64, id ) __string( path, path ) ), TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __assign_str(path); ), TP_printk("root=%d id=%llu level=%d path=%s", __entry->root, __entry->id, __entry->level, __get_str(path)) ); DEFINE_EVENT(cgroup, cgroup_mkdir, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_rmdir, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_release, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_rename, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_freeze, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_unfreeze, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DECLARE_EVENT_CLASS(cgroup_migrate, TP_PROTO(struct cgroup *dst_cgrp, const char *path, struct task_struct *task, bool threadgroup), TP_ARGS(dst_cgrp, path, task, threadgroup), TP_STRUCT__entry( __field( int, dst_root ) __field( int, dst_level ) __field( u64, dst_id ) __field( int, pid ) __string( dst_path, path ) __string( comm, task->comm ) ), TP_fast_assign( __entry->dst_root = dst_cgrp->root->hierarchy_id; __entry->dst_id = cgroup_id(dst_cgrp); __entry->dst_level = dst_cgrp->level; __assign_str(dst_path); __entry->pid = task->pid; __assign_str(comm); ), TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s", __entry->dst_root, __entry->dst_id, __entry->dst_level, __get_str(dst_path), __entry->pid, __get_str(comm)) ); DEFINE_EVENT(cgroup_migrate, cgroup_attach_task, TP_PROTO(struct cgroup *dst_cgrp, const char *path, struct task_struct *task, bool threadgroup), TP_ARGS(dst_cgrp, path, task, threadgroup) ); DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks, TP_PROTO(struct cgroup *dst_cgrp, const char *path, struct task_struct *task, bool threadgroup), TP_ARGS(dst_cgrp, path, task, threadgroup) ); DECLARE_EVENT_CLASS(cgroup_event, TP_PROTO(struct cgroup *cgrp, const char *path, int val), TP_ARGS(cgrp, path, val), TP_STRUCT__entry( __field( int, root ) __field( int, level ) __field( u64, id ) __string( path, path ) __field( int, val ) ), TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __assign_str(path); __entry->val = val; ), TP_printk("root=%d id=%llu level=%d path=%s val=%d", __entry->root, __entry->id, __entry->level, __get_str(path), __entry->val) ); DEFINE_EVENT(cgroup_event, cgroup_notify_populated, TP_PROTO(struct cgroup *cgrp, const char *path, int val), TP_ARGS(cgrp, path, val) ); DEFINE_EVENT(cgroup_event, cgroup_notify_frozen, TP_PROTO(struct cgroup *cgrp, const char *path, int val), TP_ARGS(cgrp, path, val) ); DECLARE_EVENT_CLASS(cgroup_rstat, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended), TP_STRUCT__entry( __field( int, root ) __field( int, level ) __field( u64, id ) __field( int, cpu ) __field( bool, contended ) ), TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __entry->cpu = cpu; __entry->contended = contended; ), TP_printk("root=%d id=%llu level=%d cpu=%d lock contended:%d", __entry->root, __entry->id, __entry->level, __entry->cpu, __entry->contended) ); /* Related to global: cgroup_rstat_lock */ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_locked, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); /* Related to per CPU: cgroup_rstat_cpu_lock */ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended_fastpath, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked_fastpath, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock_fastpath, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); #endif /* _TRACE_CGROUP_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
207 207 207 207 207 207 214 207 207 206 207 207 214 207 207 207 207 207 207 203 207 2 207 207 206 207 207 207 207 207 6 207 207 207 206 207 207 213 214 213 214 214 214 189 214 214 206 213 214 52 214 214 213 162 213 52 52 51 52 182 182 182 182 181 182 181 22 182 182 21 182 129 182 162 162 162 181 182 182 20 182 182 182 10 10 10 10 7 7 7 182 182 182 182 1 181 182 182 1 182 182 182 182 182 178 179 179 179 179 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Initialization routines * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/device.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/ctype.h> #include <linux/pm.h> #include <linux/debugfs.h> #include <linux/completion.h> #include <linux/interrupt.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> /* monitor files for graceful shutdown (hotplug) */ struct snd_monitor_file { struct file *file; const struct file_operations *disconnected_f_op; struct list_head shutdown_list; /* still need to shutdown */ struct list_head list; /* link of monitor files */ }; static DEFINE_SPINLOCK(shutdown_lock); static LIST_HEAD(shutdown_files); static const struct file_operations snd_shutdown_f_ops; /* locked for registering/using */ static DECLARE_BITMAP(snd_cards_lock, SNDRV_CARDS); static struct snd_card *snd_cards[SNDRV_CARDS]; static DEFINE_MUTEX(snd_card_mutex); static char *slots[SNDRV_CARDS]; module_param_array(slots, charp, NULL, 0444); MODULE_PARM_DESC(slots, "Module names assigned to the slots."); /* return non-zero if the given index is reserved for the given * module via slots option */ static int module_slot_match(struct module *module, int idx) { int match = 1; #ifdef CONFIG_MODULES const char *s1, *s2; if (!module || !*module->name || !slots[idx]) return 0; s1 = module->name; s2 = slots[idx]; if (*s2 == '!') { match = 0; /* negative match */ s2++; } /* compare module name strings * hyphens are handled as equivalent with underscore */ for (;;) { char c1 = *s1++; char c2 = *s2++; if (c1 == '-') c1 = '_'; if (c2 == '-') c2 = '_'; if (c1 != c2) return !match; if (!c1) break; } #endif /* CONFIG_MODULES */ return match; } #if IS_ENABLED(CONFIG_SND_MIXER_OSS) int (*snd_mixer_oss_notify_callback)(struct snd_card *card, int free_flag); EXPORT_SYMBOL(snd_mixer_oss_notify_callback); #endif static int check_empty_slot(struct module *module, int slot) { return !slots[slot] || !*slots[slot]; } /* return an empty slot number (>= 0) found in the given bitmask @mask. * @mask == -1 == 0xffffffff means: take any free slot up to 32 * when no slot is available, return the original @mask as is. */ static int get_slot_from_bitmask(int mask, int (*check)(struct module *, int), struct module *module) { int slot; for (slot = 0; slot < SNDRV_CARDS; slot++) { if (slot < 32 && !(mask & (1U << slot))) continue; if (!test_bit(slot, snd_cards_lock)) { if (check(module, slot)) return slot; /* found */ } } return mask; /* unchanged */ } /* the default release callback set in snd_device_alloc() */ static void default_release_alloc(struct device *dev) { kfree(dev); } /** * snd_device_alloc - Allocate and initialize struct device for sound devices * @dev_p: pointer to store the allocated device * @card: card to assign, optional * * For releasing the allocated device, call put_device(). */ int snd_device_alloc(struct device **dev_p, struct snd_card *card) { struct device *dev; *dev_p = NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; device_initialize(dev); if (card) dev->parent = &card->card_dev; dev->class = &sound_class; dev->release = default_release_alloc; *dev_p = dev; return 0; } EXPORT_SYMBOL_GPL(snd_device_alloc); static int snd_card_init(struct snd_card *card, struct device *parent, int idx, const char *xid, struct module *module, size_t extra_size); static int snd_card_do_free(struct snd_card *card); static const struct attribute_group card_dev_attr_group; static void release_card_device(struct device *dev) { snd_card_do_free(dev_to_snd_card(dev)); } /** * snd_card_new - create and initialize a soundcard structure * @parent: the parent device object * @idx: card index (address) [0 ... (SNDRV_CARDS-1)] * @xid: card identification (ASCII string) * @module: top level module for locking * @extra_size: allocate this extra size after the main soundcard structure * @card_ret: the pointer to store the created card instance * * The function allocates snd_card instance via kzalloc with the given * space for the driver to use freely. The allocated struct is stored * in the given card_ret pointer. * * Return: Zero if successful or a negative error code. */ int snd_card_new(struct device *parent, int idx, const char *xid, struct module *module, int extra_size, struct snd_card **card_ret) { struct snd_card *card; int err; if (snd_BUG_ON(!card_ret)) return -EINVAL; *card_ret = NULL; if (extra_size < 0) extra_size = 0; card = kzalloc(sizeof(*card) + extra_size, GFP_KERNEL); if (!card) return -ENOMEM; err = snd_card_init(card, parent, idx, xid, module, extra_size); if (err < 0) return err; /* card is freed by error handler */ *card_ret = card; return 0; } EXPORT_SYMBOL(snd_card_new); static void __snd_card_release(struct device *dev, void *data) { snd_card_free(data); } /** * snd_devm_card_new - managed snd_card object creation * @parent: the parent device object * @idx: card index (address) [0 ... (SNDRV_CARDS-1)] * @xid: card identification (ASCII string) * @module: top level module for locking * @extra_size: allocate this extra size after the main soundcard structure * @card_ret: the pointer to store the created card instance * * This function works like snd_card_new() but manages the allocated resource * via devres, i.e. you don't need to free explicitly. * * When a snd_card object is created with this function and registered via * snd_card_register(), the very first devres action to call snd_card_free() * is added automatically. In that way, the resource disconnection is assured * at first, then released in the expected order. * * If an error happens at the probe before snd_card_register() is called and * there have been other devres resources, you'd need to free the card manually * via snd_card_free() call in the error; otherwise it may lead to UAF due to * devres call orders. You can use snd_card_free_on_error() helper for * handling it more easily. * * Return: zero if successful, or a negative error code */ int snd_devm_card_new(struct device *parent, int idx, const char *xid, struct module *module, size_t extra_size, struct snd_card **card_ret) { struct snd_card *card; int err; *card_ret = NULL; card = devres_alloc(__snd_card_release, sizeof(*card) + extra_size, GFP_KERNEL); if (!card) return -ENOMEM; card->managed = true; err = snd_card_init(card, parent, idx, xid, module, extra_size); if (err < 0) { devres_free(card); /* in managed mode, we need to free manually */ return err; } devres_add(parent, card); *card_ret = card; return 0; } EXPORT_SYMBOL_GPL(snd_devm_card_new); /** * snd_card_free_on_error - a small helper for handling devm probe errors * @dev: the managed device object * @ret: the return code from the probe callback * * This function handles the explicit snd_card_free() call at the error from * the probe callback. It's just a small helper for simplifying the error * handling with the managed devices. * * Return: zero if successful, or a negative error code */ int snd_card_free_on_error(struct device *dev, int ret) { struct snd_card *card; if (!ret) return 0; card = devres_find(dev, __snd_card_release, NULL, NULL); if (card) snd_card_free(card); return ret; } EXPORT_SYMBOL_GPL(snd_card_free_on_error); static int snd_card_init(struct snd_card *card, struct device *parent, int idx, const char *xid, struct module *module, size_t extra_size) { int err; if (extra_size > 0) card->private_data = (char *)card + sizeof(struct snd_card); if (xid) strscpy(card->id, xid, sizeof(card->id)); err = 0; scoped_guard(mutex, &snd_card_mutex) { if (idx < 0) /* first check the matching module-name slot */ idx = get_slot_from_bitmask(idx, module_slot_match, module); if (idx < 0) /* if not matched, assign an empty slot */ idx = get_slot_from_bitmask(idx, check_empty_slot, module); if (idx < 0) err = -ENODEV; else if (idx < snd_ecards_limit) { if (test_bit(idx, snd_cards_lock)) err = -EBUSY; /* invalid */ } else if (idx >= SNDRV_CARDS) err = -ENODEV; if (!err) { set_bit(idx, snd_cards_lock); /* lock it */ if (idx >= snd_ecards_limit) snd_ecards_limit = idx + 1; /* increase the limit */ } } if (err < 0) { dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n", idx, snd_ecards_limit - 1, err); if (!card->managed) kfree(card); /* manually free here, as no destructor called */ return err; } card->dev = parent; card->number = idx; WARN_ON(IS_MODULE(CONFIG_SND) && !module); card->module = module; INIT_LIST_HEAD(&card->devices); init_rwsem(&card->controls_rwsem); rwlock_init(&card->controls_rwlock); INIT_LIST_HEAD(&card->controls); INIT_LIST_HEAD(&card->ctl_files); #ifdef CONFIG_SND_CTL_FAST_LOOKUP xa_init(&card->ctl_numids); xa_init(&card->ctl_hash); #endif spin_lock_init(&card->files_lock); INIT_LIST_HEAD(&card->files_list); mutex_init(&card->memory_mutex); #ifdef CONFIG_PM init_waitqueue_head(&card->power_sleep); init_waitqueue_head(&card->power_ref_sleep); atomic_set(&card->power_ref, 0); #endif init_waitqueue_head(&card->remove_sleep); card->sync_irq = -1; device_initialize(&card->card_dev); card->card_dev.parent = parent; card->card_dev.class = &sound_class; card->card_dev.release = release_card_device; card->card_dev.groups = card->dev_groups; card->dev_groups[0] = &card_dev_attr_group; err = kobject_set_name(&card->card_dev.kobj, "card%d", idx); if (err < 0) goto __error; snprintf(card->irq_descr, sizeof(card->irq_descr), "%s:%s", dev_driver_string(card->dev), dev_name(&card->card_dev)); /* the control interface cannot be accessed from the user space until */ /* snd_cards_bitmask and snd_cards are set with snd_card_register */ err = snd_ctl_create(card); if (err < 0) { dev_err(parent, "unable to register control minors\n"); goto __error; } err = snd_info_card_create(card); if (err < 0) { dev_err(parent, "unable to create card info\n"); goto __error_ctl; } #ifdef CONFIG_SND_DEBUG card->debugfs_root = debugfs_create_dir(dev_name(&card->card_dev), sound_debugfs_root); #endif return 0; __error_ctl: snd_device_free_all(card); __error: put_device(&card->card_dev); return err; } /** * snd_card_ref - Get the card object from the index * @idx: the card index * * Returns a card object corresponding to the given index or NULL if not found. * Release the object via snd_card_unref(). * * Return: a card object or NULL */ struct snd_card *snd_card_ref(int idx) { struct snd_card *card; guard(mutex)(&snd_card_mutex); card = snd_cards[idx]; if (card) get_device(&card->card_dev); return card; } EXPORT_SYMBOL_GPL(snd_card_ref); /* return non-zero if a card is already locked */ int snd_card_locked(int card) { guard(mutex)(&snd_card_mutex); return test_bit(card, snd_cards_lock); } static loff_t snd_disconnect_llseek(struct file *file, loff_t offset, int orig) { return -ENODEV; } static ssize_t snd_disconnect_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return -ENODEV; } static ssize_t snd_disconnect_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { return -ENODEV; } static int snd_disconnect_release(struct inode *inode, struct file *file) { struct snd_monitor_file *df = NULL, *_df; scoped_guard(spinlock, &shutdown_lock) { list_for_each_entry(_df, &shutdown_files, shutdown_list) { if (_df->file == file) { df = _df; list_del_init(&df->shutdown_list); break; } } } if (likely(df)) { if ((file->f_flags & FASYNC) && df->disconnected_f_op->fasync) df->disconnected_f_op->fasync(-1, file, 0); return df->disconnected_f_op->release(inode, file); } panic("%s(%p, %p) failed!", __func__, inode, file); } static __poll_t snd_disconnect_poll(struct file * file, poll_table * wait) { return EPOLLERR | EPOLLNVAL; } static long snd_disconnect_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return -ENODEV; } static int snd_disconnect_mmap(struct file *file, struct vm_area_struct *vma) { return -ENODEV; } static int snd_disconnect_fasync(int fd, struct file *file, int on) { return -ENODEV; } static const struct file_operations snd_shutdown_f_ops = { .owner = THIS_MODULE, .llseek = snd_disconnect_llseek, .read = snd_disconnect_read, .write = snd_disconnect_write, .release = snd_disconnect_release, .poll = snd_disconnect_poll, .unlocked_ioctl = snd_disconnect_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = snd_disconnect_ioctl, #endif .mmap = snd_disconnect_mmap, .fasync = snd_disconnect_fasync }; /** * snd_card_disconnect - disconnect all APIs from the file-operations (user space) * @card: soundcard structure * * Disconnects all APIs from the file-operations (user space). * * Return: Zero, otherwise a negative error code. * * Note: The current implementation replaces all active file->f_op with special * dummy file operations (they do nothing except release). */ void snd_card_disconnect(struct snd_card *card) { struct snd_monitor_file *mfile; if (!card) return; scoped_guard(spinlock, &card->files_lock) { if (card->shutdown) return; card->shutdown = 1; /* replace file->f_op with special dummy operations */ list_for_each_entry(mfile, &card->files_list, list) { /* it's critical part, use endless loop */ /* we have no room to fail */ mfile->disconnected_f_op = mfile->file->f_op; scoped_guard(spinlock, &shutdown_lock) list_add(&mfile->shutdown_list, &shutdown_files); mfile->file->f_op = &snd_shutdown_f_ops; fops_get(mfile->file->f_op); } } #ifdef CONFIG_PM /* wake up sleepers here before other callbacks for avoiding potential * deadlocks with other locks (e.g. in kctls); * then this notifies the shutdown and sleepers would abort immediately */ wake_up_all(&card->power_sleep); #endif /* notify all connected devices about disconnection */ /* at this point, they cannot respond to any calls except release() */ #if IS_ENABLED(CONFIG_SND_MIXER_OSS) if (snd_mixer_oss_notify_callback) snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_DISCONNECT); #endif /* notify all devices that we are disconnected */ snd_device_disconnect_all(card); if (card->sync_irq > 0) synchronize_irq(card->sync_irq); snd_info_card_disconnect(card); #ifdef CONFIG_SND_DEBUG debugfs_remove(card->debugfs_root); card->debugfs_root = NULL; #endif if (card->registered) { device_del(&card->card_dev); card->registered = false; } /* disable fops (user space) operations for ALSA API */ scoped_guard(mutex, &snd_card_mutex) { snd_cards[card->number] = NULL; clear_bit(card->number, snd_cards_lock); } snd_power_sync_ref(card); } EXPORT_SYMBOL(snd_card_disconnect); /** * snd_card_disconnect_sync - disconnect card and wait until files get closed * @card: card object to disconnect * * This calls snd_card_disconnect() for disconnecting all belonging components * and waits until all pending files get closed. * It assures that all accesses from user-space finished so that the driver * can release its resources gracefully. */ void snd_card_disconnect_sync(struct snd_card *card) { snd_card_disconnect(card); guard(spinlock_irq)(&card->files_lock); wait_event_lock_irq(card->remove_sleep, list_empty(&card->files_list), card->files_lock); } EXPORT_SYMBOL_GPL(snd_card_disconnect_sync); static int snd_card_do_free(struct snd_card *card) { card->releasing = true; #if IS_ENABLED(CONFIG_SND_MIXER_OSS) if (snd_mixer_oss_notify_callback) snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_FREE); #endif snd_device_free_all(card); if (card->private_free) card->private_free(card); if (snd_info_card_free(card) < 0) { dev_warn(card->dev, "unable to free card info\n"); /* Not fatal error */ } if (card->release_completion) complete(card->release_completion); if (!card->managed) kfree(card); return 0; } /** * snd_card_free_when_closed - Disconnect the card, free it later eventually * @card: soundcard structure * * Unlike snd_card_free(), this function doesn't try to release the card * resource immediately, but tries to disconnect at first. When the card * is still in use, the function returns before freeing the resources. * The card resources will be freed when the refcount gets to zero. * * Return: zero if successful, or a negative error code */ void snd_card_free_when_closed(struct snd_card *card) { if (!card) return; snd_card_disconnect(card); put_device(&card->card_dev); return; } EXPORT_SYMBOL(snd_card_free_when_closed); /** * snd_card_free - frees given soundcard structure * @card: soundcard structure * * This function releases the soundcard structure and the all assigned * devices automatically. That is, you don't have to release the devices * by yourself. * * This function waits until the all resources are properly released. * * Return: Zero. Frees all associated devices and frees the control * interface associated to given soundcard. */ void snd_card_free(struct snd_card *card) { DECLARE_COMPLETION_ONSTACK(released); /* The call of snd_card_free() is allowed from various code paths; * a manual call from the driver and the call via devres_free, and * we need to avoid double-free. Moreover, the release via devres * may call snd_card_free() twice due to its nature, we need to have * the check here at the beginning. */ if (card->releasing) return; card->release_completion = &released; snd_card_free_when_closed(card); /* wait, until all devices are ready for the free operation */ wait_for_completion(&released); } EXPORT_SYMBOL(snd_card_free); /* check, if the character is in the valid ASCII range */ static inline bool safe_ascii_char(char c) { return isascii(c) && isalnum(c); } /* retrieve the last word of shortname or longname */ static const char *retrieve_id_from_card_name(const char *name) { const char *spos = name; while (*name) { if (isspace(*name) && safe_ascii_char(name[1])) spos = name + 1; name++; } return spos; } /* return true if the given id string doesn't conflict any other card ids */ static bool card_id_ok(struct snd_card *card, const char *id) { int i; if (!snd_info_check_reserved_words(id)) return false; for (i = 0; i < snd_ecards_limit; i++) { if (snd_cards[i] && snd_cards[i] != card && !strcmp(snd_cards[i]->id, id)) return false; } return true; } /* copy to card->id only with valid letters from nid */ static void copy_valid_id_string(struct snd_card *card, const char *src, const char *nid) { char *id = card->id; while (*nid && !safe_ascii_char(*nid)) nid++; if (isdigit(*nid)) *id++ = isalpha(*src) ? *src : 'D'; while (*nid && (size_t)(id - card->id) < sizeof(card->id) - 1) { if (safe_ascii_char(*nid)) *id++ = *nid; nid++; } *id = 0; } /* Set card->id from the given string * If the string conflicts with other ids, add a suffix to make it unique. */ static void snd_card_set_id_no_lock(struct snd_card *card, const char *src, const char *nid) { int len, loops; bool is_default = false; char *id; copy_valid_id_string(card, src, nid); id = card->id; again: /* use "Default" for obviously invalid strings * ("card" conflicts with proc directories) */ if (!*id || !strncmp(id, "card", 4)) { strcpy(id, "Default"); is_default = true; } len = strlen(id); for (loops = 0; loops < SNDRV_CARDS; loops++) { char *spos; char sfxstr[5]; /* "_012" */ int sfxlen; if (card_id_ok(card, id)) return; /* OK */ /* Add _XYZ suffix */ sprintf(sfxstr, "_%X", loops + 1); sfxlen = strlen(sfxstr); if (len + sfxlen >= sizeof(card->id)) spos = id + sizeof(card->id) - sfxlen - 1; else spos = id + len; strcpy(spos, sfxstr); } /* fallback to the default id */ if (!is_default) { *id = 0; goto again; } /* last resort... */ dev_err(card->dev, "unable to set card id (%s)\n", id); if (card->proc_root->name) strscpy(card->id, card->proc_root->name, sizeof(card->id)); } /** * snd_card_set_id - set card identification name * @card: soundcard structure * @nid: new identification string * * This function sets the card identification and checks for name * collisions. */ void snd_card_set_id(struct snd_card *card, const char *nid) { /* check if user specified own card->id */ if (card->id[0] != '\0') return; guard(mutex)(&snd_card_mutex); snd_card_set_id_no_lock(card, nid, nid); } EXPORT_SYMBOL(snd_card_set_id); static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_card *card = container_of(dev, struct snd_card, card_dev); return sysfs_emit(buf, "%s\n", card->id); } static ssize_t id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_card *card = container_of(dev, struct snd_card, card_dev); char buf1[sizeof(card->id)]; size_t copy = count > sizeof(card->id) - 1 ? sizeof(card->id) - 1 : count; size_t idx; int c; for (idx = 0; idx < copy; idx++) { c = buf[idx]; if (!safe_ascii_char(c) && c != '_' && c != '-') return -EINVAL; } memcpy(buf1, buf, copy); buf1[copy] = '\0'; guard(mutex)(&snd_card_mutex); if (!card_id_ok(NULL, buf1)) return -EEXIST; strcpy(card->id, buf1); snd_info_card_id_change(card); return count; } static DEVICE_ATTR_RW(id); static ssize_t number_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_card *card = container_of(dev, struct snd_card, card_dev); return sysfs_emit(buf, "%i\n", card->number); } static DEVICE_ATTR_RO(number); static struct attribute *card_dev_attrs[] = { &dev_attr_id.attr, &dev_attr_number.attr, NULL }; static const struct attribute_group card_dev_attr_group = { .attrs = card_dev_attrs, }; /** * snd_card_add_dev_attr - Append a new sysfs attribute group to card * @card: card instance * @group: attribute group to append * * Return: zero if successful, or a negative error code */ int snd_card_add_dev_attr(struct snd_card *card, const struct attribute_group *group) { int i; /* loop for (arraysize-1) here to keep NULL at the last entry */ for (i = 0; i < ARRAY_SIZE(card->dev_groups) - 1; i++) { if (!card->dev_groups[i]) { card->dev_groups[i] = group; return 0; } } dev_err(card->dev, "Too many groups assigned\n"); return -ENOSPC; } EXPORT_SYMBOL_GPL(snd_card_add_dev_attr); static void trigger_card_free(void *data) { snd_card_free(data); } /** * snd_card_register - register the soundcard * @card: soundcard structure * * This function registers all the devices assigned to the soundcard. * Until calling this, the ALSA control interface is blocked from the * external accesses. Thus, you should call this function at the end * of the initialization of the card. * * Return: Zero otherwise a negative error code if the registration failed. */ int snd_card_register(struct snd_card *card) { int err; if (snd_BUG_ON(!card)) return -EINVAL; if (!card->registered) { err = device_add(&card->card_dev); if (err < 0) return err; card->registered = true; } else { if (card->managed) devm_remove_action(card->dev, trigger_card_free, card); } if (card->managed) { err = devm_add_action(card->dev, trigger_card_free, card); if (err < 0) return err; } err = snd_device_register_all(card); if (err < 0) return err; scoped_guard(mutex, &snd_card_mutex) { if (snd_cards[card->number]) { /* already registered */ return snd_info_card_register(card); /* register pending info */ } if (*card->id) { /* make a unique id name from the given string */ char tmpid[sizeof(card->id)]; memcpy(tmpid, card->id, sizeof(card->id)); snd_card_set_id_no_lock(card, tmpid, tmpid); } else { /* create an id from either shortname or longname */ const char *src; src = *card->shortname ? card->shortname : card->longname; snd_card_set_id_no_lock(card, src, retrieve_id_from_card_name(src)); } snd_cards[card->number] = card; } err = snd_info_card_register(card); if (err < 0) return err; #if IS_ENABLED(CONFIG_SND_MIXER_OSS) if (snd_mixer_oss_notify_callback) snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_REGISTER); #endif return 0; } EXPORT_SYMBOL(snd_card_register); #ifdef CONFIG_SND_PROC_FS static void snd_card_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int idx, count; struct snd_card *card; for (idx = count = 0; idx < SNDRV_CARDS; idx++) { guard(mutex)(&snd_card_mutex); card = snd_cards[idx]; if (card) { count++; snd_iprintf(buffer, "%2i [%-15s]: %s - %s\n", idx, card->id, card->driver, card->shortname); snd_iprintf(buffer, " %s\n", card->longname); } } if (!count) snd_iprintf(buffer, "--- no soundcards ---\n"); } #ifdef CONFIG_SND_OSSEMUL void snd_card_info_read_oss(struct snd_info_buffer *buffer) { int idx, count; struct snd_card *card; for (idx = count = 0; idx < SNDRV_CARDS; idx++) { guard(mutex)(&snd_card_mutex); card = snd_cards[idx]; if (card) { count++; snd_iprintf(buffer, "%s\n", card->longname); } } if (!count) { snd_iprintf(buffer, "--- no soundcards ---\n"); } } #endif #ifdef CONFIG_MODULES static void snd_card_module_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int idx; struct snd_card *card; for (idx = 0; idx < SNDRV_CARDS; idx++) { guard(mutex)(&snd_card_mutex); card = snd_cards[idx]; if (card) snd_iprintf(buffer, "%2i %s\n", idx, card->module->name); } } #endif int __init snd_card_info_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "cards", NULL); if (! entry) return -ENOMEM; entry->c.text.read = snd_card_info_read; if (snd_info_register(entry) < 0) return -ENOMEM; /* freed in error path */ #ifdef CONFIG_MODULES entry = snd_info_create_module_entry(THIS_MODULE, "modules", NULL); if (!entry) return -ENOMEM; entry->c.text.read = snd_card_module_info_read; if (snd_info_register(entry) < 0) return -ENOMEM; /* freed in error path */ #endif return 0; } #endif /* CONFIG_SND_PROC_FS */ /** * snd_component_add - add a component string * @card: soundcard structure * @component: the component id string * * This function adds the component id string to the supported list. * The component can be referred from the alsa-lib. * * Return: Zero otherwise a negative error code. */ int snd_component_add(struct snd_card *card, const char *component) { char *ptr; int len = strlen(component); ptr = strstr(card->components, component); if (ptr != NULL) { if (ptr[len] == '\0' || ptr[len] == ' ') /* already there */ return 1; } if (strlen(card->components) + 1 + len + 1 > sizeof(card->components)) { snd_BUG(); return -ENOMEM; } if (card->components[0] != '\0') strcat(card->components, " "); strcat(card->components, component); return 0; } EXPORT_SYMBOL(snd_component_add); /** * snd_card_file_add - add the file to the file list of the card * @card: soundcard structure * @file: file pointer * * This function adds the file to the file linked-list of the card. * This linked-list is used to keep tracking the connection state, * and to avoid the release of busy resources by hotplug. * * Return: zero or a negative error code. */ int snd_card_file_add(struct snd_card *card, struct file *file) { struct snd_monitor_file *mfile; mfile = kmalloc(sizeof(*mfile), GFP_KERNEL); if (mfile == NULL) return -ENOMEM; mfile->file = file; mfile->disconnected_f_op = NULL; INIT_LIST_HEAD(&mfile->shutdown_list); guard(spinlock)(&card->files_lock); if (card->shutdown) { kfree(mfile); return -ENODEV; } list_add(&mfile->list, &card->files_list); get_device(&card->card_dev); return 0; } EXPORT_SYMBOL(snd_card_file_add); /** * snd_card_file_remove - remove the file from the file list * @card: soundcard structure * @file: file pointer * * This function removes the file formerly added to the card via * snd_card_file_add() function. * If all files are removed and snd_card_free_when_closed() was * called beforehand, it processes the pending release of * resources. * * Return: Zero or a negative error code. */ int snd_card_file_remove(struct snd_card *card, struct file *file) { struct snd_monitor_file *mfile, *found = NULL; scoped_guard(spinlock, &card->files_lock) { list_for_each_entry(mfile, &card->files_list, list) { if (mfile->file == file) { list_del(&mfile->list); scoped_guard(spinlock, &shutdown_lock) list_del(&mfile->shutdown_list); if (mfile->disconnected_f_op) fops_put(mfile->disconnected_f_op); found = mfile; break; } } if (list_empty(&card->files_list)) wake_up_all(&card->remove_sleep); } if (!found) { dev_err(card->dev, "card file remove problem (%p)\n", file); return -ENOENT; } kfree(found); put_device(&card->card_dev); return 0; } EXPORT_SYMBOL(snd_card_file_remove); #ifdef CONFIG_PM /** * snd_power_ref_and_wait - wait until the card gets powered up * @card: soundcard structure * * Take the power_ref reference count of the given card, and * wait until the card gets powered up to SNDRV_CTL_POWER_D0 state. * The refcount is down again while sleeping until power-up, hence this * function can be used for syncing the floating control ops accesses, * typically around calling control ops. * * The caller needs to pull down the refcount via snd_power_unref() later * no matter whether the error is returned from this function or not. * * Return: Zero if successful, or a negative error code. */ int snd_power_ref_and_wait(struct snd_card *card) { snd_power_ref(card); if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) return 0; wait_event_cmd(card->power_sleep, card->shutdown || snd_power_get_state(card) == SNDRV_CTL_POWER_D0, snd_power_unref(card), snd_power_ref(card)); return card->shutdown ? -ENODEV : 0; } EXPORT_SYMBOL_GPL(snd_power_ref_and_wait); /** * snd_power_wait - wait until the card gets powered up (old form) * @card: soundcard structure * * Wait until the card gets powered up to SNDRV_CTL_POWER_D0 state. * * Return: Zero if successful, or a negative error code. */ int snd_power_wait(struct snd_card *card) { int ret; ret = snd_power_ref_and_wait(card); snd_power_unref(card); return ret; } EXPORT_SYMBOL(snd_power_wait); #endif /* CONFIG_PM */ |
166 166 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 | // SPDX-License-Identifier: GPL-2.0-only /* * This file provides /sys/class/ieee80211/<wiphy name>/ * and some default attributes. * * Copyright 2005-2006 Jiri Benc <jbenc@suse.cz> * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2020-2021, 2023-2024 Intel Corporation */ #include <linux/device.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/nl80211.h> #include <linux/rtnetlink.h> #include <net/cfg80211.h> #include "sysfs.h" #include "core.h" #include "rdev-ops.h" static inline struct cfg80211_registered_device *dev_to_rdev( struct device *dev) { return container_of(dev, struct cfg80211_registered_device, wiphy.dev); } #define SHOW_FMT(name, fmt, member) \ static ssize_t name ## _show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \ } \ static DEVICE_ATTR_RO(name) SHOW_FMT(index, "%d", wiphy_idx); SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); SHOW_FMT(address_mask, "%pM", wiphy.addr_mask); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; return sprintf(buf, "%s\n", wiphy_name(wiphy)); } static DEVICE_ATTR_RO(name); static ssize_t addresses_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; char *start = buf; int i; if (!wiphy->addresses) return sprintf(buf, "%pM\n", wiphy->perm_addr); for (i = 0; i < wiphy->n_addresses; i++) buf += sprintf(buf, "%pM\n", wiphy->addresses[i].addr); return buf - start; } static DEVICE_ATTR_RO(addresses); static struct attribute *ieee80211_attrs[] = { &dev_attr_index.attr, &dev_attr_macaddress.attr, &dev_attr_address_mask.attr, &dev_attr_addresses.attr, &dev_attr_name.attr, NULL, }; ATTRIBUTE_GROUPS(ieee80211); static void wiphy_dev_release(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); cfg80211_dev_free(rdev); } #ifdef CONFIG_PM_SLEEP static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) cfg80211_leave(rdev, wdev); } static int wiphy_suspend(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; rdev->suspend_at = ktime_get_boottime_seconds(); rtnl_lock(); wiphy_lock(&rdev->wiphy); if (rdev->wiphy.registered) { if (!rdev->wiphy.wowlan_config) { cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); } cfg80211_process_wiphy_works(rdev, NULL); if (rdev->ops->suspend) ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); if (ret == 1) { /* Driver refuse to configure wowlan */ cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); cfg80211_process_wiphy_works(rdev, NULL); ret = rdev_suspend(rdev, NULL); } if (ret == 0) rdev->suspended = true; } wiphy_unlock(&rdev->wiphy); rtnl_unlock(); return ret; } static int wiphy_resume(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; /* Age scan results with time spent in suspend */ cfg80211_bss_age(rdev, ktime_get_boottime_seconds() - rdev->suspend_at); rtnl_lock(); wiphy_lock(&rdev->wiphy); if (rdev->wiphy.registered && rdev->ops->resume) ret = rdev_resume(rdev); rdev->suspended = false; queue_work(system_unbound_wq, &rdev->wiphy_work); wiphy_unlock(&rdev->wiphy); if (ret) cfg80211_shutdown_all_interfaces(&rdev->wiphy); rtnl_unlock(); return ret; } static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume); #define WIPHY_PM_OPS (&wiphy_pm_ops) #else #define WIPHY_PM_OPS NULL #endif static const void *wiphy_namespace(const struct device *d) { struct wiphy *wiphy = container_of(d, struct wiphy, dev); return wiphy_net(wiphy); } struct class ieee80211_class = { .name = "ieee80211", .dev_release = wiphy_dev_release, .dev_groups = ieee80211_groups, .pm = WIPHY_PM_OPS, .ns_type = &net_ns_type_operations, .namespace = wiphy_namespace, }; int wiphy_sysfs_init(void) { return class_register(&ieee80211_class); } void wiphy_sysfs_exit(void) { class_unregister(&ieee80211_class); } |
29 28 29 13 12 13 12 13 29 29 28 29 228 180 28 27 24 28 13 28 18 29 29 13 12 13 27 29 27 29 28 28 18 3 1 17 3 3 28 29 29 29 12 13 13 13 13 13 13 13 29 29 13 12 143 225 143 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MM_INLINE_H #define LINUX_MM_INLINE_H #include <linux/atomic.h> #include <linux/huge_mm.h> #include <linux/mm_types.h> #include <linux/swap.h> #include <linux/string.h> #include <linux/userfaultfd_k.h> #include <linux/swapops.h> /** * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? * @folio: The folio to test. * * We would like to get this info without a page flag, but the state * needs to survive until the folio is last deleted from the LRU, which * could be as far down as __page_cache_release. * * Return: An integer (not a boolean!) used to sort a folio onto the * right LRU list and to account folios correctly. * 1 if @folio is a regular filesystem backed page cache folio * or a lazily freed anonymous folio (e.g. via MADV_FREE). * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise * ram or swap backed folio. */ static inline int folio_is_file_lru(struct folio *folio) { return !folio_test_swapbacked(folio); } static inline int page_is_file_lru(struct page *page) { return folio_is_file_lru(page_folio(page)); } static __always_inline void __update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, long nr_pages) { struct pglist_data *pgdat = lruvec_pgdat(lruvec); lockdep_assert_held(&lruvec->lru_lock); WARN_ON_ONCE(nr_pages != (int)nr_pages); __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); } static __always_inline void update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, long nr_pages) { __update_lru_size(lruvec, lru, zid, nr_pages); #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); #endif } /** * __folio_clear_lru_flags - Clear page lru flags before releasing a page. * @folio: The folio that was on lru and now has a zero reference. */ static __always_inline void __folio_clear_lru_flags(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); __folio_clear_lru(folio); /* this shouldn't happen, so leave the flags to bad_page() */ if (folio_test_active(folio) && folio_test_unevictable(folio)) return; __folio_clear_active(folio); __folio_clear_unevictable(folio); } /** * folio_lru_list - Which LRU list should a folio be on? * @folio: The folio to test. * * Return: The LRU list a folio should be on, as an index * into the array of LRU lists. */ static __always_inline enum lru_list folio_lru_list(struct folio *folio) { enum lru_list lru; VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); if (folio_test_unevictable(folio)) return LRU_UNEVICTABLE; lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; if (folio_test_active(folio)) lru += LRU_ACTIVE; return lru; } #ifdef CONFIG_LRU_GEN #ifdef CONFIG_LRU_GEN_ENABLED static inline bool lru_gen_enabled(void) { DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]); return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]); } #else static inline bool lru_gen_enabled(void) { DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]); return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]); } #endif static inline bool lru_gen_in_fault(void) { return current->in_lru_fault; } static inline int lru_gen_from_seq(unsigned long seq) { return seq % MAX_NR_GENS; } static inline int lru_hist_from_seq(unsigned long seq) { return seq % NR_HIST_GENS; } static inline int lru_tier_from_refs(int refs) { VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); /* see the comment in folio_lru_refs() */ return order_base_2(refs + 1); } static inline int folio_lru_refs(struct folio *folio) { unsigned long flags = READ_ONCE(folio->flags); bool workingset = flags & BIT(PG_workingset); /* * Return the number of accesses beyond PG_referenced, i.e., N-1 if the * total number of accesses is N>1, since N=0,1 both map to the first * tier. lru_tier_from_refs() will account for this off-by-one. Also see * the comment on MAX_NR_TIERS. */ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; } static inline int folio_lru_gen(struct folio *folio) { unsigned long flags = READ_ONCE(folio->flags); return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; } static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen) { unsigned long max_seq = lruvec->lrugen.max_seq; VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); /* see the comment on MIN_NR_GENS */ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1); } static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, int old_gen, int new_gen) { int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); int delta = folio_nr_pages(folio); enum lru_list lru = type * LRU_INACTIVE_FILE; struct lru_gen_folio *lrugen = &lruvec->lrugen; VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); if (old_gen >= 0) WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], lrugen->nr_pages[old_gen][type][zone] - delta); if (new_gen >= 0) WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], lrugen->nr_pages[new_gen][type][zone] + delta); /* addition */ if (old_gen < 0) { if (lru_gen_is_active(lruvec, new_gen)) lru += LRU_ACTIVE; __update_lru_size(lruvec, lru, zone, delta); return; } /* deletion */ if (new_gen < 0) { if (lru_gen_is_active(lruvec, old_gen)) lru += LRU_ACTIVE; __update_lru_size(lruvec, lru, zone, -delta); return; } /* promotion */ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) { __update_lru_size(lruvec, lru, zone, -delta); __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta); } /* demotion requires isolation, e.g., lru_deactivate_fn() */ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); } static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) { unsigned long seq; unsigned long flags; int gen = folio_lru_gen(folio); int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); struct lru_gen_folio *lrugen = &lruvec->lrugen; VM_WARN_ON_ONCE_FOLIO(gen != -1, folio); if (folio_test_unevictable(folio) || !lrugen->enabled) return false; /* * There are four common cases for this page: * 1. If it's hot, i.e., freshly faulted in, add it to the youngest * generation, and it's protected over the rest below. * 2. If it can't be evicted immediately, i.e., a dirty page pending * writeback, add it to the second youngest generation. * 3. If it should be evicted first, e.g., cold and clean from * folio_rotate_reclaimable(), add it to the oldest generation. * 4. Everything else falls between 2 & 3 above and is added to the * second oldest generation if it's considered inactive, or the * oldest generation otherwise. See lru_gen_is_active(). */ if (folio_test_active(folio)) seq = lrugen->max_seq; else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || (folio_test_reclaim(folio) && (folio_test_dirty(folio) || folio_test_writeback(folio)))) seq = lrugen->max_seq - 1; else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq) seq = lrugen->min_seq[type]; else seq = lrugen->min_seq[type] + 1; gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; /* see the comment on MIN_NR_GENS about PG_active */ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); lru_gen_update_size(lruvec, folio, -1, gen); /* for folio_rotate_reclaimable() */ if (reclaiming) list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); else list_add(&folio->lru, &lrugen->folios[gen][type][zone]); return true; } static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) { unsigned long flags; int gen = folio_lru_gen(folio); if (gen < 0) return false; VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); /* for folio_migrate_flags() */ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags); gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; lru_gen_update_size(lruvec, folio, gen, -1); list_del(&folio->lru); return true; } #else /* !CONFIG_LRU_GEN */ static inline bool lru_gen_enabled(void) { return false; } static inline bool lru_gen_in_fault(void) { return false; } static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) { return false; } static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) { return false; } #endif /* CONFIG_LRU_GEN */ static __always_inline void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); if (lru_gen_add_folio(lruvec, folio, false)) return; update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); if (lru != LRU_UNEVICTABLE) list_add(&folio->lru, &lruvec->lists[lru]); } static __always_inline void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); if (lru_gen_add_folio(lruvec, folio, true)) return; update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); /* This is not expected to be used on LRU_UNEVICTABLE */ list_add_tail(&folio->lru, &lruvec->lists[lru]); } static __always_inline void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) { enum lru_list lru = folio_lru_list(folio); if (lru_gen_del_folio(lruvec, folio, false)) return; if (lru != LRU_UNEVICTABLE) list_del(&folio->lru); update_lru_size(lruvec, lru, folio_zonenum(folio), -folio_nr_pages(folio)); } #ifdef CONFIG_ANON_VMA_NAME /* mmap_lock should be read-locked */ static inline void anon_vma_name_get(struct anon_vma_name *anon_name) { if (anon_name) kref_get(&anon_name->kref); } static inline void anon_vma_name_put(struct anon_vma_name *anon_name) { if (anon_name) kref_put(&anon_name->kref, anon_vma_name_free); } static inline struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) { /* Prevent anon_name refcount saturation early on */ if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { anon_vma_name_get(anon_name); return anon_name; } return anon_vma_name_alloc(anon_name->name); } static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) { struct anon_vma_name *anon_name = anon_vma_name(orig_vma); if (anon_name) new_vma->anon_name = anon_vma_name_reuse(anon_name); } static inline void free_anon_vma_name(struct vm_area_struct *vma) { /* * Not using anon_vma_name because it generates a warning if mmap_lock * is not held, which might be the case here. */ anon_vma_name_put(vma->anon_name); } static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, struct anon_vma_name *anon_name2) { if (anon_name1 == anon_name2) return true; return anon_name1 && anon_name2 && !strcmp(anon_name1->name, anon_name2->name); } #else /* CONFIG_ANON_VMA_NAME */ static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) {} static inline void free_anon_vma_name(struct vm_area_struct *vma) {} static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, struct anon_vma_name *anon_name2) { return true; } #endif /* CONFIG_ANON_VMA_NAME */ static inline void init_tlb_flush_pending(struct mm_struct *mm) { atomic_set(&mm->tlb_flush_pending, 0); } static inline void inc_tlb_flush_pending(struct mm_struct *mm) { atomic_inc(&mm->tlb_flush_pending); /* * The only time this value is relevant is when there are indeed pages * to flush. And we'll only flush pages after changing them, which * requires the PTL. * * So the ordering here is: * * atomic_inc(&mm->tlb_flush_pending); * spin_lock(&ptl); * ... * set_pte_at(); * spin_unlock(&ptl); * * spin_lock(&ptl) * mm_tlb_flush_pending(); * .... * spin_unlock(&ptl); * * flush_tlb_range(); * atomic_dec(&mm->tlb_flush_pending); * * Where the increment if constrained by the PTL unlock, it thus * ensures that the increment is visible if the PTE modification is * visible. After all, if there is no PTE modification, nobody cares * about TLB flushes either. * * This very much relies on users (mm_tlb_flush_pending() and * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc * locks (PPC) the unlock of one doesn't order against the lock of * another PTL. * * The decrement is ordered by the flush_tlb_range(), such that * mm_tlb_flush_pending() will not return false unless all flushes have * completed. */ } static inline void dec_tlb_flush_pending(struct mm_struct *mm) { /* * See inc_tlb_flush_pending(). * * This cannot be smp_mb__before_atomic() because smp_mb() simply does * not order against TLB invalidate completion, which is what we need. * * Therefore we must rely on tlb_flush_*() to guarantee order. */ atomic_dec(&mm->tlb_flush_pending); } static inline bool mm_tlb_flush_pending(struct mm_struct *mm) { /* * Must be called after having acquired the PTL; orders against that * PTLs release and therefore ensures that if we observe the modified * PTE we must also observe the increment from inc_tlb_flush_pending(). * * That is, it only guarantees to return true if there is a flush * pending for _this_ PTL. */ return atomic_read(&mm->tlb_flush_pending); } static inline bool mm_tlb_flush_nested(struct mm_struct *mm) { /* * Similar to mm_tlb_flush_pending(), we must have acquired the PTL * for which there is a TLB flush pending in order to guarantee * we've seen both that PTE modification and the increment. * * (no requirement on actually still holding the PTL, that is irrelevant) */ return atomic_read(&mm->tlb_flush_pending) > 1; } #ifdef CONFIG_MMU /* * Computes the pte marker to copy from the given source entry into dst_vma. * If no marker should be copied, returns 0. * The caller should insert a new pte created with make_pte_marker(). */ static inline pte_marker copy_pte_marker( swp_entry_t entry, struct vm_area_struct *dst_vma) { pte_marker srcm = pte_marker_get(entry); /* Always copy error entries. */ pte_marker dstm = srcm & PTE_MARKER_POISONED; /* Only copy PTE markers if UFFD register matches. */ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) dstm |= PTE_MARKER_UFFD_WP; return dstm; } #endif /* * If this pte is wr-protected by uffd-wp in any form, arm the special pte to * replace a none pte. NOTE! This should only be called when *pte is already * cleared so we will never accidentally replace something valuable. Meanwhile * none pte also means we are not demoting the pte so tlb flushed is not needed. * E.g., when pte cleared the caller should have taken care of the tlb flush. * * Must be called with pgtable lock held so that no thread will see the none * pte, and if they see it, they'll fault and serialize at the pgtable lock. * * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. */ static inline void pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, pte_t pteval) { #ifdef CONFIG_PTE_MARKER_UFFD_WP bool arm_uffd_pte = false; /* The current status of the pte should be "cleared" before calling */ WARN_ON_ONCE(!pte_none(ptep_get(pte))); /* * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole * thing, because when zapping either it means it's dropping the * page, or in TTU where the present pte will be quickly replaced * with a swap pte. There's no way of leaking the bit. */ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) return; /* A uffd-wp wr-protected normal pte */ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) arm_uffd_pte = true; /* * A uffd-wp wr-protected swap pte. Note: this should even cover an * existing pte marker with uffd-wp bit set. */ if (unlikely(pte_swp_uffd_wp_any(pteval))) arm_uffd_pte = true; if (unlikely(arm_uffd_pte)) set_pte_at(vma->vm_mm, addr, pte, make_pte_marker(PTE_MARKER_UFFD_WP)); #endif } static inline bool vma_has_recency(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) return false; if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE)) return false; return true; } #endif |
12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/uaccess.h> #include <linux/user-return-notifier.h> #include <linux/uprobes.h> #include <linux/context_tracking.h> #include <linux/entry-common.h> #include <linux/syscalls.h> #include <linux/rseq.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/fpu/signal.h> #include <asm/fpu/xstate.h> #include <asm/vdso.h> #include <asm/mce.h> #include <asm/sighandling.h> #include <asm/vm86.h> #include <asm/syscall.h> #include <asm/sigframe.h> #include <asm/signal.h> #include <asm/shstk.h> static inline int is_ia32_compat_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_IA32_EMULATION) && ksig->ka.sa.sa_flags & SA_IA32_ABI; } static inline int is_ia32_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig); } static inline int is_x32_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_X86_X32_ABI) && ksig->ka.sa.sa_flags & SA_X32_ABI; } /* * Enable all pkeys temporarily, so as to ensure that both the current * execution stack as well as the alternate signal stack are writeable. * The application can use any of the available pkeys to protect the * alternate signal stack, and we don't know which one it is, so enable * all. The PKRU register will be reset to init_pkru later in the flow, * in fpu__clear_user_states(), and it is the application's responsibility * to enable the appropriate pkey as the first step in the signal handler * so that the handler does not segfault. */ static inline u32 sig_prepare_pkru(void) { u32 orig_pkru = read_pkru(); write_pkru(0); return orig_pkru; } /* * Set up a signal frame. */ /* x86 ABI requires 16-byte alignment */ #define FRAME_ALIGNMENT 16UL #define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1) /* * Determine which stack to use.. */ void __user * get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, void __user **fpstate) { struct k_sigaction *ka = &ksig->ka; int ia32_frame = is_ia32_frame(ksig); /* Default to using normal stack */ bool nested_altstack = on_sig_stack(regs->sp); bool entering_altstack = false; unsigned long math_size = 0; unsigned long sp = regs->sp; unsigned long buf_fx = 0; u32 pkru; /* redzone */ if (!ia32_frame) sp -= 128; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { /* * This checks nested_altstack via sas_ss_flags(). Sensible * programs use SS_AUTODISARM, which disables that check, and * programs that don't use SS_AUTODISARM get compatible. */ if (sas_ss_flags(sp) == 0) { sp = current->sas_ss_sp + current->sas_ss_size; entering_altstack = true; } } else if (ia32_frame && !nested_altstack && regs->ss != __USER_DS && !(ka->sa.sa_flags & SA_RESTORER) && ka->sa.sa_restorer) { /* This is the legacy signal stack switching. */ sp = (unsigned long) ka->sa.sa_restorer; entering_altstack = true; } sp = fpu__alloc_mathframe(sp, ia32_frame, &buf_fx, &math_size); *fpstate = (void __user *)sp; sp -= frame_size; if (ia32_frame) /* * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4; else sp = round_down(sp, FRAME_ALIGNMENT) - 8; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (unlikely((nested_altstack || entering_altstack) && !__on_sig_stack(sp))) { if (show_unhandled_signals && printk_ratelimit()) pr_info("%s[%d] overflowed sigaltstack\n", current->comm, task_pid_nr(current)); return (void __user *)-1L; } /* Update PKRU to enable access to the alternate signal stack. */ pkru = sig_prepare_pkru(); /* save i387 and extended state */ if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size, pkru)) { /* * Restore PKRU to the original, user-defined value; disable * extra pkeys enabled for the alternate signal stack, if any. */ write_pkru(pkru); return (void __user *)-1L; } return (void __user *)sp; } /* * There are four different struct types for signal frame: sigframe_ia32, * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case * -- the largest size. It means the size for 64-bit apps is a bit more * than needed, but this keeps the code simple. */ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32) #else # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe) #endif /* * The FP state frame contains an XSAVE buffer which must be 64-byte aligned. * If a signal frame starts at an unaligned address, extra space is required. * This is the max alignment padding, conservatively. */ #define MAX_XSAVE_PADDING 63UL /* * The frame data is composed of the following areas and laid out as: * * ------------------------- * | alignment padding | * ------------------------- * | (f)xsave frame | * ------------------------- * | fsave header | * ------------------------- * | alignment padding | * ------------------------- * | siginfo + ucontext | * ------------------------- */ /* max_frame_size tells userspace the worst case signal stack size. */ static unsigned long __ro_after_init max_frame_size; static unsigned int __ro_after_init fpu_default_state_size; static int __init init_sigframe_size(void) { fpu_default_state_size = fpu__get_fpstate_size(); max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING; max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING; /* Userspace expects an aligned size. */ max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); pr_info("max sigframe size: %lu\n", max_frame_size); return 0; } early_initcall(init_sigframe_size); unsigned long get_sigframe_size(void) { return max_frame_size; } static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { /* Perform fixup for the pre-signal frame. */ rseq_signal_deliver(ksig, regs); /* Set up the stack frame */ if (is_ia32_frame(ksig)) { if (ksig->ka.sa.sa_flags & SA_SIGINFO) return ia32_setup_rt_frame(ksig, regs); else return ia32_setup_frame(ksig, regs); } else if (is_x32_frame(ksig)) { return x32_setup_rt_frame(ksig, regs); } else { return x64_setup_rt_frame(ksig, regs); } } static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { bool stepping, failed; struct fpu *fpu = ¤t->thread.fpu; if (v8086_mode(regs)) save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); /* Are we from a system call? */ if (syscall_get_nr(current, regs) != -1) { /* If so, check system call restarting.. */ switch (syscall_get_error(current, regs)) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->ax = -EINTR; break; case -ERESTARTSYS: if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->ax = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; break; } } /* * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now * so that register information in the sigcontext is correct and * then notify the tracer before entering the signal handler. */ stepping = test_thread_flag(TIF_SINGLESTEP); if (stepping) user_disable_single_step(current); failed = (setup_rt_frame(ksig, regs) < 0); if (!failed) { /* * Clear the direction flag as per the ABI for function entry. * * Clear RF when entering the signal handler, because * it might disable possible debug exception from the * signal handler. * * Clear TF for the case when it wasn't set by debugger to * avoid the recursive send_sigtrap() in SIGTRAP handler. */ regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); /* * Ensure the signal handler starts with the new fpu state. */ fpu__clear_user_states(fpu); } signal_setup_done(failed, ksig, stepping); } static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) { #ifdef CONFIG_IA32_EMULATION if (current->restart_block.arch_data & TS_COMPAT) return __NR_ia32_restart_syscall; #endif #ifdef CONFIG_X86_X32_ABI return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); #else return __NR_restart_syscall; #endif } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ void arch_do_signal_or_restart(struct pt_regs *regs) { struct ksignal ksig; if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ handle_signal(&ksig, regs); return; } /* Did we come from a system call? */ if (syscall_get_nr(current, regs) != -1) { /* Restart the system call - no handlers present */ switch (syscall_get_error(current, regs)) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; break; case -ERESTART_RESTARTBLOCK: regs->ax = get_nr_restart_syscall(regs); regs->ip -= 2; break; } } /* * If there's no signal to deliver, we just put the saved sigmask * back. */ restore_saved_sigmask(); } void signal_fault(struct pt_regs *regs, void __user *frame, char *where) { struct task_struct *me = current; if (show_unhandled_signals && printk_ratelimit()) { printk("%s" "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, me->comm, me->pid, where, frame, regs->ip, regs->sp, regs->orig_ax); print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } force_sig(SIGSEGV); } #ifdef CONFIG_DYNAMIC_SIGFRAME #ifdef CONFIG_STRICT_SIGALTSTACK_SIZE static bool strict_sigaltstack_size __ro_after_init = true; #else static bool strict_sigaltstack_size __ro_after_init = false; #endif static int __init strict_sas_size(char *arg) { return kstrtobool(arg, &strict_sigaltstack_size) == 0; } __setup("strict_sas_size", strict_sas_size); /* * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512 * exceeds that size already. As such programs might never use the * sigaltstack they just continued to work. While always checking against * the real size would be correct, this might be considered a regression. * * Therefore avoid the sanity check, unless enforced by kernel * configuration or command line option. * * When dynamic FPU features are supported, the check is also enforced when * the task has permissions to use dynamic features. Tasks which have no * permission are checked against the size of the non-dynamic feature set * if strict checking is enabled. This avoids forcing all tasks on the * system to allocate large sigaltstacks even if they are never going * to use a dynamic feature. As this is serialized via sighand::siglock * any permission request for a dynamic feature either happened already * or will see the newly install sigaltstack size in the permission checks. */ bool sigaltstack_size_valid(size_t ss_size) { unsigned long fsize = max_frame_size - fpu_default_state_size; u64 mask; lockdep_assert_held(¤t->sighand->siglock); if (!fpu_state_size_dynamic() && !strict_sigaltstack_size) return true; fsize += current->group_leader->thread.fpu.perm.__user_state_size; if (likely(ss_size > fsize)) return true; if (strict_sigaltstack_size) return ss_size > fsize; mask = current->group_leader->thread.fpu.perm.__state_perm; if (mask & XFEATURE_MASK_USER_DYNAMIC) return ss_size > fsize; return true; } #endif /* CONFIG_DYNAMIC_SIGFRAME */ |
109 20 20 20 20 20 3 3 688 688 687 689 157 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 | // SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/gfp.h> #include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/fixmap.h> #include <asm/mtrr.h> #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; EXPORT_SYMBOL(physical_mask); #endif #ifdef CONFIG_HIGHPTE #define PGTABLE_HIGHMEM __GFP_HIGHMEM #else #define PGTABLE_HIGHMEM 0 #endif #ifndef CONFIG_PARAVIRT static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) { tlb_remove_page(tlb, table); } #endif gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM; pgtable_t pte_alloc_one(struct mm_struct *mm) { return __pte_alloc_one(mm, __userpte_alloc_gfp); } static int __init setup_userpte(char *arg) { if (!arg) return -EINVAL; /* * "userpte=nohigh" disables allocation of user pagetables in * high memory. */ if (strcmp(arg, "nohigh") == 0) __userpte_alloc_gfp &= ~__GFP_HIGHMEM; else return -EINVAL; return 0; } early_param("userpte", setup_userpte); void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) { pagetable_pte_dtor(page_ptdesc(pte)); paravirt_release_pte(page_to_pfn(pte)); paravirt_tlb_remove_table(tlb, pte); } #if CONFIG_PGTABLE_LEVELS > 2 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { struct ptdesc *ptdesc = virt_to_ptdesc(pmd); paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); /* * NOTE! For PAE, any changes to the top page-directory-pointer-table * entries need a full cr3 reload to flush. */ #ifdef CONFIG_X86_PAE tlb->need_flush_all = 1; #endif pagetable_pmd_dtor(ptdesc); paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc)); } #if CONFIG_PGTABLE_LEVELS > 3 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) { struct ptdesc *ptdesc = virt_to_ptdesc(pud); pagetable_pud_dtor(ptdesc); paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(pud)); } #if CONFIG_PGTABLE_LEVELS > 4 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) { paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); } #endif /* CONFIG_PGTABLE_LEVELS > 4 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */ #endif /* CONFIG_PGTABLE_LEVELS > 2 */ static inline void pgd_list_add(pgd_t *pgd) { struct ptdesc *ptdesc = virt_to_ptdesc(pgd); list_add(&ptdesc->pt_list, &pgd_list); } static inline void pgd_list_del(pgd_t *pgd) { struct ptdesc *ptdesc = virt_to_ptdesc(pgd); list_del(&ptdesc->pt_list); } #define UNSHARED_PTRS_PER_PGD \ (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) #define MAX_UNSHARED_PTRS_PER_PGD \ MAX_T(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) { virt_to_ptdesc(pgd)->pt_mm = mm; } struct mm_struct *pgd_page_get_mm(struct page *page) { return page_ptdesc(page)->pt_mm; } static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) { /* If the pgd points to a shared pagetable level (either the ptes in non-PAE, or shared PMD in PAE), then just copy the references from swapper_pg_dir. */ if (CONFIG_PGTABLE_LEVELS == 2 || (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || CONFIG_PGTABLE_LEVELS >= 4) { clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); } /* list required to sync kernel mapping updates */ if (!SHARED_KERNEL_PMD) { pgd_set_mm(pgd, mm); pgd_list_add(pgd); } } static void pgd_dtor(pgd_t *pgd) { if (SHARED_KERNEL_PMD) return; spin_lock(&pgd_lock); pgd_list_del(pgd); spin_unlock(&pgd_lock); } /* * List of all pgd's needed for non-PAE so it can invalidate entries * in both cached and uncached pgd's; not needed for PAE since the * kernel pmd is shared. If PAE were not to share the pmd a similar * tactic would be needed. This is essentially codepath-based locking * against pageattr.c; it is the unique case in which a valid change * of kernel pagetables can't be lazily synchronized by vmalloc faults. * vmalloc faults work because attached pagetables are never freed. * -- nyc */ #ifdef CONFIG_X86_PAE /* * In PAE mode, we need to do a cr3 reload (=tlb flush) when * updating the top-level pagetable entries to guarantee the * processor notices the update. Since this is expensive, and * all 4 top-level entries are used almost immediately in a * new process's life, we just pre-populate them here. * * Also, if we're in a paravirt environment where the kernel pmd is * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate * and initialize the kernel pmds here. */ #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD /* * We allocate separate PMDs for the kernel part of the user page-table * when PTI is enabled. We need them to map the per-process LDT into the * user-space page-table. */ #define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \ KERNEL_PGD_PTRS : 0) #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) { paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); /* Note: almost everything apart from _PAGE_PRESENT is reserved at the pmd (PDPT) level. */ set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); /* * According to Intel App note "TLBs, Paging-Structure Caches, * and Their Invalidation", April 2007, document 317080-001, * section 8.1: in PAE mode we explicitly have to flush the * TLB via cr3 if the top-level pgd is changed... */ flush_tlb_mm(mm); } #else /* !CONFIG_X86_PAE */ /* No need to prepopulate any pagetable entries in non-PAE modes. */ #define PREALLOCATED_PMDS 0 #define MAX_PREALLOCATED_PMDS 0 #define PREALLOCATED_USER_PMDS 0 #define MAX_PREALLOCATED_USER_PMDS 0 #endif /* CONFIG_X86_PAE */ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) { int i; struct ptdesc *ptdesc; for (i = 0; i < count; i++) if (pmds[i]) { ptdesc = virt_to_ptdesc(pmds[i]); pagetable_pmd_dtor(ptdesc); pagetable_free(ptdesc); mm_dec_nr_pmds(mm); } } static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) { int i; bool failed = false; gfp_t gfp = GFP_PGTABLE_USER; if (mm == &init_mm) gfp &= ~__GFP_ACCOUNT; gfp &= ~__GFP_HIGHMEM; for (i = 0; i < count; i++) { pmd_t *pmd = NULL; struct ptdesc *ptdesc = pagetable_alloc(gfp, 0); if (!ptdesc) failed = true; if (ptdesc && !pagetable_pmd_ctor(ptdesc)) { pagetable_free(ptdesc); ptdesc = NULL; failed = true; } if (ptdesc) { mm_inc_nr_pmds(mm); pmd = ptdesc_address(ptdesc); } pmds[i] = pmd; } if (failed) { free_pmds(mm, pmds, count); return -ENOMEM; } return 0; } /* * Mop up any pmd pages which may still be attached to the pgd. * Normally they will be freed by munmap/exit_mmap, but any pmd we * preallocate which never got a corresponding vma will need to be * freed manually. */ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp) { pgd_t pgd = *pgdp; if (pgd_val(pgd) != 0) { pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); pgd_clear(pgdp); paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); } } static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) { int i; for (i = 0; i < PREALLOCATED_PMDS; i++) mop_up_one_pmd(mm, &pgdp[i]); #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION if (!boot_cpu_has(X86_FEATURE_PTI)) return; pgdp = kernel_to_user_pgdp(pgdp); for (i = 0; i < PREALLOCATED_USER_PMDS; i++) mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]); #endif } static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) { p4d_t *p4d; pud_t *pud; int i; p4d = p4d_offset(pgd, 0); pud = pud_offset(p4d, 0); for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { pmd_t *pmd = pmds[i]; if (i >= KERNEL_PGD_BOUNDARY) memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), sizeof(pmd_t) * PTRS_PER_PMD); pud_populate(mm, pud, pmd); } } #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION static void pgd_prepopulate_user_pmd(struct mm_struct *mm, pgd_t *k_pgd, pmd_t *pmds[]) { pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir); pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); p4d_t *u_p4d; pud_t *u_pud; int i; u_p4d = p4d_offset(u_pgd, 0); u_pud = pud_offset(u_p4d, 0); s_pgd += KERNEL_PGD_BOUNDARY; u_pud += KERNEL_PGD_BOUNDARY; for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) { pmd_t *pmd = pmds[i]; memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd), sizeof(pmd_t) * PTRS_PER_PMD); pud_populate(mm, u_pud, pmd); } } #else static void pgd_prepopulate_user_pmd(struct mm_struct *mm, pgd_t *k_pgd, pmd_t *pmds[]) { } #endif /* * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also * assumes that pgd should be in one page. * * But kernel with PAE paging that is not running as a Xen domain * only needs to allocate 32 bytes for pgd instead of one page. */ #ifdef CONFIG_X86_PAE #include <linux/slab.h> #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #define PGD_ALIGN 32 static struct kmem_cache *pgd_cache; void __init pgtable_cache_init(void) { /* * When PAE kernel is running as a Xen domain, it does not use * shared kernel pmd. And this requires a whole page for pgd. */ if (!SHARED_KERNEL_PMD) return; /* * when PAE kernel is not running as a Xen domain, it uses * shared kernel pmd. Shared kernel pmd does not require a whole * page for pgd. We are able to just allocate a 32-byte for pgd. * During boot time, we create a 32-byte slab for pgd table allocation. */ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN, SLAB_PANIC, NULL); } static inline pgd_t *_pgd_alloc(void) { /* * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. * We allocate one page for pgd. */ if (!SHARED_KERNEL_PMD) return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, PGD_ALLOCATION_ORDER); /* * Now PAE kernel is not running as a Xen domain. We can allocate * a 32-byte slab for pgd to save memory space. */ return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER); } static inline void _pgd_free(pgd_t *pgd) { if (!SHARED_KERNEL_PMD) free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); else kmem_cache_free(pgd_cache, pgd); } #else static inline pgd_t *_pgd_alloc(void) { return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, PGD_ALLOCATION_ORDER); } static inline void _pgd_free(pgd_t *pgd) { free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); } #endif /* CONFIG_X86_PAE */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; pmd_t *pmds[MAX_PREALLOCATED_PMDS]; pgd = _pgd_alloc(); if (pgd == NULL) goto out; mm->pgd = pgd; if (sizeof(pmds) != 0 && preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0) goto out_free_pgd; if (sizeof(u_pmds) != 0 && preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0) goto out_free_pmds; if (paravirt_pgd_alloc(mm) != 0) goto out_free_user_pmds; /* * Make sure that pre-populating the pmds is atomic with * respect to anything walking the pgd_list, so that they * never see a partially populated pgd. */ spin_lock(&pgd_lock); pgd_ctor(mm, pgd); if (sizeof(pmds) != 0) pgd_prepopulate_pmd(mm, pgd, pmds); if (sizeof(u_pmds) != 0) pgd_prepopulate_user_pmd(mm, pgd, u_pmds); spin_unlock(&pgd_lock); return pgd; out_free_user_pmds: if (sizeof(u_pmds) != 0) free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS); out_free_pmds: if (sizeof(pmds) != 0) free_pmds(mm, pmds, PREALLOCATED_PMDS); out_free_pgd: _pgd_free(pgd); out: return NULL; } void pgd_free(struct mm_struct *mm, pgd_t *pgd) { pgd_mop_up_pmds(mm, pgd); pgd_dtor(pgd); paravirt_pgd_free(mm, pgd); _pgd_free(pgd); } /* * Used to set accessed or dirty bits in the page table entries * on other architectures. On x86, the accessed and dirty bits * are tracked by hardware. However, do_wp_page calls this function * to also make the pte writeable at the same time the dirty bit is * set. In that case we do actually need to write the PTE. */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed = !pte_same(*ptep, entry); if (changed && dirty) set_pte(ptep, entry); return changed; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { int changed = !pmd_same(*pmdp, entry); VM_BUG_ON(address & ~HPAGE_PMD_MASK); if (changed && dirty) { set_pmd(pmdp, entry); /* * We had a write-protection fault here and changed the pmd * to to more permissive. No need to flush the TLB for that, * #PF is architecturally guaranteed to do that and in the * worst-case we'll generate a spurious fault. */ } return changed; } int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty) { int changed = !pud_same(*pudp, entry); VM_BUG_ON(address & ~HPAGE_PUD_MASK); if (changed && dirty) { set_pud(pudp, entry); /* * We had a write-protection fault here and changed the pud * to to more permissive. No need to flush the TLB for that, * #PF is architecturally guaranteed to do that and in the * worst-case we'll generate a spurious fault. */ } return changed; } #endif int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { int ret = 0; if (pte_young(*ptep)) ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long *) &ptep->pte); return ret; } #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { int ret = 0; if (pmd_young(*pmdp)) ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long *)pmdp); return ret; } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE int pudp_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp) { int ret = 0; if (pud_young(*pudp)) ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long *)pudp); return ret; } #endif int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* * On x86 CPUs, clearing the accessed bit without a TLB flush * doesn't cause data corruption. [ It could cause incorrect * page aging and the (mistaken) reclaim of hot pages, but the * chance of that should be relatively low. ] * * So as a performance optimization don't flush the TLB when * clearing the accessed bit, it will eventually be flushed by * a context switch or a VM operation anyway. [ In the rare * event of it not getting flushed for a long time the delay * shouldn't really matter because there's no real memory * pressure for swapout to react to. ] */ return ptep_test_and_clear_young(vma, address, ptep); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { int young; VM_BUG_ON(address & ~HPAGE_PMD_MASK); young = pmdp_test_and_clear_young(vma, address, pmdp); if (young) flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return young; } pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { VM_WARN_ON_ONCE(!pmd_present(*pmdp)); /* * No flush is necessary. Once an invalid PTE is established, the PTE's * access and dirty bits cannot be updated. */ return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); } #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, pud_t *pudp) { VM_WARN_ON_ONCE(!pud_present(*pudp)); pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp)); flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); return old; } #endif /** * reserve_top_address - reserves a hole in the top of kernel address space * @reserve - size of hole to reserve * * Can be used to relocate the fixmap area and poke a hole in the top * of kernel address space to make room for a hypervisor. */ void __init reserve_top_address(unsigned long reserve) { #ifdef CONFIG_X86_32 BUG_ON(fixmaps_set > 0); __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", -reserve, __FIXADDR_TOP + PAGE_SIZE); #endif } int fixmaps_set; void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) { unsigned long address = __fix_to_virt(idx); #ifdef CONFIG_X86_64 /* * Ensure that the static initial page tables are covering the * fixmap completely. */ BUILD_BUG_ON(__end_of_permanent_fixed_addresses > (FIXMAP_PMD_NUM * PTRS_PER_PTE)); #endif if (idx >= __end_of_fixed_addresses) { BUG(); return; } set_pte_vaddr(address, pte); fixmaps_set++; } void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) { /* Sanitize 'prot' against any unsupported bits: */ pgprot_val(flags) &= __default_kernel_pte_mask; __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); } #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifdef CONFIG_X86_5LEVEL /** * p4d_set_huge - setup kernel P4D mapping * * No 512GB pages yet -- always return 0 */ int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { return 0; } /** * p4d_clear_huge - clear kernel P4D mapping when it is set * * No 512GB pages yet -- always return 0 */ void p4d_clear_huge(p4d_t *p4d) { } #endif /** * pud_set_huge - setup kernel PUD mapping * * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this * function sets up a huge page only if the complete range has the same MTRR * caching mode. * * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger * page mapping attempt fails. * * Returns 1 on success and 0 on failure. */ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { u8 uniform; mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform); if (!uniform) return 0; /* Bail out if we are we on a populated non-leaf entry: */ if (pud_present(*pud) && !pud_leaf(*pud)) return 0; set_pte((pte_t *)pud, pfn_pte( (u64)addr >> PAGE_SHIFT, __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE))); return 1; } /** * pmd_set_huge - setup kernel PMD mapping * * See text over pud_set_huge() above. * * Returns 1 on success and 0 on failure. */ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) { u8 uniform; mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform); if (!uniform) { pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n", __func__, addr, addr + PMD_SIZE); return 0; } /* Bail out if we are we on a populated non-leaf entry: */ if (pmd_present(*pmd) && !pmd_leaf(*pmd)) return 0; set_pte((pte_t *)pmd, pfn_pte( (u64)addr >> PAGE_SHIFT, __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE))); return 1; } /** * pud_clear_huge - clear kernel PUD mapping when it is set * * Returns 1 on success and 0 on failure (no PUD map is found). */ int pud_clear_huge(pud_t *pud) { if (pud_leaf(*pud)) { pud_clear(pud); return 1; } return 0; } /** * pmd_clear_huge - clear kernel PMD mapping when it is set * * Returns 1 on success and 0 on failure (no PMD map is found). */ int pmd_clear_huge(pmd_t *pmd) { if (pmd_leaf(*pmd)) { pmd_clear(pmd); return 1; } return 0; } #ifdef CONFIG_X86_64 /** * pud_free_pmd_page - Clear pud entry and free pmd page. * @pud: Pointer to a PUD. * @addr: Virtual address associated with pud. * * Context: The pud range has been unmapped and TLB purged. * Return: 1 if clearing the entry succeeded. 0 otherwise. * * NOTE: Callers must allow a single page allocation. */ int pud_free_pmd_page(pud_t *pud, unsigned long addr) { pmd_t *pmd, *pmd_sv; pte_t *pte; int i; pmd = pud_pgtable(*pud); pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); if (!pmd_sv) return 0; for (i = 0; i < PTRS_PER_PMD; i++) { pmd_sv[i] = pmd[i]; if (!pmd_none(pmd[i])) pmd_clear(&pmd[i]); } pud_clear(pud); /* INVLPG to clear all paging-structure caches */ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); for (i = 0; i < PTRS_PER_PMD; i++) { if (!pmd_none(pmd_sv[i])) { pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]); free_page((unsigned long)pte); } } free_page((unsigned long)pmd_sv); pagetable_pmd_dtor(virt_to_ptdesc(pmd)); free_page((unsigned long)pmd); return 1; } /** * pmd_free_pte_page - Clear pmd entry and free pte page. * @pmd: Pointer to a PMD. * @addr: Virtual address associated with pmd. * * Context: The pmd range has been unmapped and TLB purged. * Return: 1 if clearing the entry succeeded. 0 otherwise. */ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { pte_t *pte; pte = (pte_t *)pmd_page_vaddr(*pmd); pmd_clear(pmd); /* INVLPG to clear all paging-structure caches */ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); free_page((unsigned long)pte); return 1; } #else /* !CONFIG_X86_64 */ /* * Disable free page handling on x86-PAE. This assures that ioremap() * does not update sync'd pmd entries. See vmalloc_sync_one(). */ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { return pmd_none(*pmd); } #endif /* CONFIG_X86_64 */ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) { if (vma->vm_flags & VM_SHADOW_STACK) return pte_mkwrite_shstk(pte); pte = pte_mkwrite_novma(pte); return pte_clear_saveddirty(pte); } pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (vma->vm_flags & VM_SHADOW_STACK) return pmd_mkwrite_shstk(pmd); pmd = pmd_mkwrite_novma(pmd); return pmd_clear_saveddirty(pmd); } void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte) { /* * Hardware before shadow stack can (rarely) set Dirty=1 * on a Write=0 PTE. So the below condition * only indicates a software bug when shadow stack is * supported by the HW. This checking is covered in * pte_shstk(). */ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pte_shstk(pte)); } void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd) { /* See note in arch_check_zapped_pte() */ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pmd_shstk(pmd)); } void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud) { /* See note in arch_check_zapped_pte() */ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pud_shstk(pud)); } |
144 144 144 144 143 130 130 14 14 144 144 143 143 119 144 6 144 143 129 142 144 142 143 144 11 11 11 11 11 11 11 10 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 5 11 8 8 11 11 11 11 11 11 11 11 9 2 2 11 5 11 11 6 11 5 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 6 11 11 11 11 10 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 15 15 15 146 147 147 147 147 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2023 Intel Corporation */ #include <net/mac80211.h> #include <linux/module.h> #include <linux/fips.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/bitmap.h> #include <linux/inetdevice.h> #include <net/net_namespace.h> #include <net/dropreason.h> #include <net/cfg80211.h> #include <net/addrconf.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wep.h" #include "led.h" #include "debugfs.h" void ieee80211_configure_filter(struct ieee80211_local *local) { u64 mc; unsigned int changed_flags; unsigned int new_flags = 0; if (atomic_read(&local->iff_allmultis)) new_flags |= FIF_ALLMULTI; if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) || test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) new_flags |= FIF_BCN_PRBRESP_PROMISC; if (local->fif_probe_req || local->probe_req_reg) new_flags |= FIF_PROBE_REQ; if (local->fif_fcsfail) new_flags |= FIF_FCSFAIL; if (local->fif_plcpfail) new_flags |= FIF_PLCPFAIL; if (local->fif_control) new_flags |= FIF_CONTROL; if (local->fif_other_bss) new_flags |= FIF_OTHER_BSS; if (local->fif_pspoll) new_flags |= FIF_PSPOLL; if (local->rx_mcast_action_reg) new_flags |= FIF_MCAST_ACTION; spin_lock_bh(&local->filter_lock); changed_flags = local->filter_flags ^ new_flags; mc = drv_prepare_multicast(local, &local->mc_list); spin_unlock_bh(&local->filter_lock); /* be a bit nasty */ new_flags |= (1<<31); drv_configure_filter(local, changed_flags, &new_flags, mc); WARN_ON(new_flags & (1<<31)); local->filter_flags = new_flags & ~(1<<31); } static void ieee80211_reconfig_filter(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, reconfig_filter); ieee80211_configure_filter(local); } static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef = {}; struct cfg80211_chan_def *oper = NULL; enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_STATIC; u32 changed = 0; int power; u32 offchannel_flag; if (!local->emulate_chanctx) return 0; offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; if (ctx && !WARN_ON(!ctx->def.chan)) { oper = &ctx->def; if (ctx->rx_chains_static > 1) smps_mode = IEEE80211_SMPS_OFF; else if (ctx->rx_chains_dynamic > 1) smps_mode = IEEE80211_SMPS_DYNAMIC; else smps_mode = IEEE80211_SMPS_STATIC; } if (local->scan_chandef.chan) { chandef = local->scan_chandef; } else if (local->tmp_channel) { chandef.chan = local->tmp_channel; chandef.width = NL80211_CHAN_WIDTH_20_NOHT; chandef.center_freq1 = chandef.chan->center_freq; chandef.freq1_offset = chandef.chan->freq_offset; } else if (oper) { chandef = *oper; } else { chandef = local->dflt_chandef; } if (WARN(!cfg80211_chandef_valid(&chandef), "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz", chandef.chan ? chandef.chan->center_freq : -1, chandef.chan ? chandef.chan->freq_offset : 0, chandef.width, chandef.center_freq1, chandef.freq1_offset, chandef.center_freq2)) return 0; if (!oper || !cfg80211_chandef_identical(&chandef, oper)) local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; else local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; /* force it also for scanning, since drivers might config differently */ if (offchannel_flag || local->scanning || local->in_reconfig || !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) { local->hw.conf.chandef = chandef; changed |= IEEE80211_CONF_CHANGE_CHANNEL; } if (!conf_is_ht(&local->hw.conf)) { /* * mac80211.h documents that this is only valid * when the channel is set to an HT type, and * that otherwise STATIC is used. */ local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC; } else if (local->hw.conf.smps_mode != smps_mode) { local->hw.conf.smps_mode = smps_mode; changed |= IEEE80211_CONF_CHANGE_SMPS; } power = ieee80211_chandef_max_power(&chandef); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf)) continue; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; if (sdata->vif.bss_conf.txpower == INT_MIN) continue; power = min(power, sdata->vif.bss_conf.txpower); } rcu_read_unlock(); if (local->hw.conf.power_level != power) { changed |= IEEE80211_CONF_CHANGE_POWER; local->hw.conf.power_level = power; } return changed; } int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) { int ret = 0; might_sleep(); WARN_ON(changed & (IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER | IEEE80211_CONF_CHANGE_SMPS)); if (changed && local->open_count) { ret = drv_config(local, changed); /* * Goal: * HW reconfiguration should never fail, the driver has told * us what it can support so it should live up to that promise. * * Current status: * rfkill is not integrated with mac80211 and a * configuration command can thus fail if hardware rfkill * is enabled * * FIXME: integrate rfkill with mac80211 and then add this * WARN_ON() back * */ /* WARN_ON(ret); */ } return ret; } /* for scanning, offchannel and chanctx emulation only */ static int _ieee80211_hw_conf_chan(struct ieee80211_local *local, struct ieee80211_chanctx_conf *ctx) { u32 changed; if (!local->open_count) return 0; changed = ieee80211_calc_hw_conf_chan(local, ctx); if (!changed) return 0; return drv_config(local, changed); } int ieee80211_hw_conf_chan(struct ieee80211_local *local) { struct ieee80211_chanctx *ctx; ctx = list_first_entry_or_null(&local->chanctx_list, struct ieee80211_chanctx, list); return _ieee80211_hw_conf_chan(local, ctx ? &ctx->conf : NULL); } void ieee80211_hw_conf_init(struct ieee80211_local *local) { u32 changed = ~(IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER | IEEE80211_CONF_CHANGE_SMPS); if (WARN_ON(!local->open_count)) return; if (local->emulate_chanctx) { struct ieee80211_chanctx *ctx; ctx = list_first_entry_or_null(&local->chanctx_list, struct ieee80211_chanctx, list); changed |= ieee80211_calc_hw_conf_chan(local, ctx ? &ctx->conf : NULL); } WARN_ON(drv_config(local, changed)); } int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = ctx->radar_enabled; return _ieee80211_hw_conf_chan(local, ctx); } EXPORT_SYMBOL(ieee80211_emulate_add_chanctx); void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = false; _ieee80211_hw_conf_chan(local, NULL); } EXPORT_SYMBOL(ieee80211_emulate_remove_chanctx); void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = ctx->radar_enabled; _ieee80211_hw_conf_chan(local, ctx); } EXPORT_SYMBOL(ieee80211_emulate_change_chanctx); int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct ieee80211_local *local = hw_to_local(hw); if (n_vifs <= 0) return -EINVAL; local->hw.conf.radar_enabled = vifs[0].new_ctx->radar_enabled; _ieee80211_hw_conf_chan(local, vifs[0].new_ctx); return 0; } EXPORT_SYMBOL(ieee80211_emulate_switch_vif_chanctx); #define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\ BSS_CHANGED_IDLE |\ BSS_CHANGED_PS |\ BSS_CHANGED_IBSS |\ BSS_CHANGED_ARP_FILTER |\ BSS_CHANGED_SSID |\ BSS_CHANGED_MLD_VALID_LINKS |\ BSS_CHANGED_MLD_TTLM) void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed) { struct ieee80211_local *local = sdata->local; might_sleep(); WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED) && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_OCB)) return; if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && !sdata->vif.bss_conf.mu_mimo_owner && !(changed & BSS_CHANGED_TXPOWER)))) return; if (!check_sdata_in_driver(sdata)) return; if (changed & BSS_CHANGED_VIF_CFG_FLAGS) { u64 ch = changed & BSS_CHANGED_VIF_CFG_FLAGS; trace_drv_vif_cfg_changed(local, sdata, changed); if (local->ops->vif_cfg_changed) local->ops->vif_cfg_changed(&local->hw, &sdata->vif, ch); } if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) { u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS; trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf, changed); if (local->ops->link_info_changed) local->ops->link_info_changed(&local->hw, &sdata->vif, &sdata->vif.bss_conf, ch); } if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, &sdata->vif.bss_conf, changed); trace_drv_return_void(local); } void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed) { struct ieee80211_local *local = sdata->local; WARN_ON_ONCE(changed & ~BSS_CHANGED_VIF_CFG_FLAGS); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; drv_vif_cfg_changed(local, sdata, changed); } void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, u64 changed) { struct ieee80211_local *local = sdata->local; WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; if (!check_sdata_in_driver(sdata)) return; drv_link_info_changed(local, sdata, link->conf, link->link_id, changed); } u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) { sdata->vif.bss_conf.use_cts_prot = false; sdata->vif.bss_conf.use_short_preamble = false; sdata->vif.bss_conf.use_short_slot = false; return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT; } /* context: requires softirqs disabled */ void ieee80211_handle_queued_frames(struct ieee80211_local *local) { struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue)) || (skb = skb_dequeue(&local->skb_queue_unreliable))) { switch (skb->pkt_type) { case IEEE80211_RX_MSG: /* Clear skb->pkt_type in order to not confuse kernel * netstack. */ skb->pkt_type = 0; ieee80211_rx(&local->hw, skb); break; case IEEE80211_TX_STATUS_MSG: skb->pkt_type = 0; ieee80211_tx_status_skb(&local->hw, skb); break; default: WARN(1, "mac80211: Packet is of unknown type %d\n", skb->pkt_type); dev_kfree_skb(skb); break; } } } static void ieee80211_tasklet_handler(struct tasklet_struct *t) { struct ieee80211_local *local = from_tasklet(local, t, tasklet); ieee80211_handle_queued_frames(local); } static void ieee80211_restart_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, restart_work); struct ieee80211_sub_if_data *sdata; int ret; flush_workqueue(local->workqueue); rtnl_lock(); /* we might do interface manipulations, so need both */ wiphy_lock(local->hw.wiphy); wiphy_work_flush(local->hw.wiphy, NULL); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); list_for_each_entry(sdata, &local->interfaces, list) { /* * XXX: there may be more work for other vif types and even * for station mode: a good thing would be to run most of * the iface type's dependent _stop (ieee80211_mg_stop, * ieee80211_ibss_stop) etc... * For now, fix only the specific bug that was seen: race * between csa_connection_drop_work and us. */ if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* * This worker is scheduled from the iface worker that * runs on mac80211's workqueue, so we can't be * scheduling this worker after the cancel right here. * The exception is ieee80211_chswitch_done. * Then we can have a race... */ wiphy_work_cancel(local->hw.wiphy, &sdata->u.mgd.csa_connection_drop_work); if (sdata->vif.bss_conf.csa_active) ieee80211_sta_connection_lost(sdata, WLAN_REASON_UNSPECIFIED, false); } wiphy_delayed_work_flush(local->hw.wiphy, &sdata->dec_tailroom_needed_wk); } ieee80211_scan_cancel(local); /* make sure any new ROC will consider local->in_reconfig */ wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work); wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done); /* wait for all packet processing to be done */ synchronize_net(); ret = ieee80211_reconfig(local); wiphy_unlock(local->hw.wiphy); if (ret) cfg80211_shutdown_all_interfaces(local->hw.wiphy); rtnl_unlock(); } void ieee80211_restart_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_restart_hw(local); wiphy_info(hw->wiphy, "Hardware restart was requested\n"); /* use this reason, ieee80211_reconfig will unblock it */ ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); /* * Stop all Rx during the reconfig. We don't want state changes * or driver callbacks while this is in progress. */ local->in_reconfig = true; barrier(); queue_work(system_freezable_wq, &local->restart_work); } EXPORT_SYMBOL(ieee80211_restart_hw); #ifdef CONFIG_INET static int ieee80211_ifa_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct in_ifaddr *ifa = arg; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa_notifier); struct net_device *ndev = ifa->ifa_dev->dev; struct wireless_dev *wdev = ndev->ieee80211_ptr; struct in_device *idev; struct ieee80211_sub_if_data *sdata; struct ieee80211_vif_cfg *vif_cfg; struct ieee80211_if_managed *ifmgd; int c = 0; /* Make sure it's our interface that got changed */ if (!wdev) return NOTIFY_DONE; if (wdev->wiphy != local->hw.wiphy || !wdev->registered) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); vif_cfg = &sdata->vif.cfg; /* ARP filtering is only supported in managed mode */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; idev = __in_dev_get_rtnl(sdata->dev); if (!idev) return NOTIFY_DONE; ifmgd = &sdata->u.mgd; /* * The nested here is needed to convince lockdep that this is * all OK. Yes, we lock the wiphy mutex here while we already * hold the notifier rwsem, that's the normal case. And yes, * we also acquire the notifier rwsem again when unregistering * a netdev while we already hold the wiphy mutex, so it does * look like a typical ABBA deadlock. * * However, both of these things happen with the RTNL held * already. Therefore, they can't actually happen, since the * lock orders really are ABC and ACB, which is fine due to * the RTNL (A). * * We still need to prevent recursion, which is accomplished * by the !wdev->registered check above. */ mutex_lock_nested(&local->hw.wiphy->mtx, 1); __acquire(&local->hw.wiphy->mtx); /* Copy the addresses to the vif config list */ ifa = rtnl_dereference(idev->ifa_list); while (ifa) { if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN) vif_cfg->arp_addr_list[c] = ifa->ifa_address; ifa = rtnl_dereference(ifa->ifa_next); c++; } vif_cfg->arp_addr_cnt = c; /* Configure driver only if associated (which also implies it is up) */ if (ifmgd->associated) ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER); wiphy_unlock(local->hw.wiphy); return NOTIFY_OK; } #endif #if IS_ENABLED(CONFIG_IPV6) static int ieee80211_ifa6_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; struct inet6_dev *idev = ifa->idev; struct net_device *ndev = ifa->idev->dev; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa6_notifier); struct wireless_dev *wdev = ndev->ieee80211_ptr; struct ieee80211_sub_if_data *sdata; /* Make sure it's our interface that got changed */ if (!wdev || wdev->wiphy != local->hw.wiphy) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); /* * For now only support station mode. This is mostly because * doing AP would have to handle AP_VLAN in some way ... */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; drv_ipv6_addr_change(local, sdata, idev); return NOTIFY_OK; } #endif /* There isn't a lot of sense in it, but you can transmit anything you like */ static const struct ieee80211_txrx_stypes ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_ADHOC] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_STATION] = { .tx = 0xffff, /* * To support Pre Association Security Negotiation (PASN) while * already associated to one AP, allow user space to register to * Rx authentication frames, so that the user space logic would * be able to receive/handle authentication frames from a * different AP as part of PASN. * It is expected that user space would intelligently register * for Rx authentication frames, i.e., only when PASN is used * and configure a match filter only for PASN authentication * algorithm, as otherwise the MLME functionality of mac80211 * would be broken. */ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_AP] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_AP_VLAN] = { /* copy AP */ .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_P2P_GO] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_MESH_POINT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4), }, [NL80211_IFTYPE_P2P_DEVICE] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, }; static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR | IEEE80211_HT_AMPDU_PARM_DENSITY, .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_RX_STBC | IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_40MHZ_INTOLERANT), .mcs = { .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, }, }; static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { .vht_cap_info = cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_RXSTBC_MASK | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK), .supp_mcs = { .rx_mcs_map = cpu_to_le16(~0), .tx_mcs_map = cpu_to_le16(~0), }, }; struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, const struct ieee80211_ops *ops, const char *requested_name) { struct ieee80211_local *local; int priv_size, i; struct wiphy *wiphy; bool emulate_chanctx; if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || !ops->add_interface || !ops->remove_interface || !ops->configure_filter || !ops->wake_tx_queue)) return NULL; if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) return NULL; if (WARN_ON(!!ops->link_info_changed != !!ops->vif_cfg_changed || (ops->link_info_changed && ops->bss_info_changed))) return NULL; /* check all or no channel context operations exist */ if (ops->add_chanctx == ieee80211_emulate_add_chanctx && ops->remove_chanctx == ieee80211_emulate_remove_chanctx && ops->change_chanctx == ieee80211_emulate_change_chanctx) { if (WARN_ON(ops->assign_vif_chanctx || ops->unassign_vif_chanctx)) return NULL; emulate_chanctx = true; } else { if (WARN_ON(ops->add_chanctx == ieee80211_emulate_add_chanctx || ops->remove_chanctx == ieee80211_emulate_remove_chanctx || ops->change_chanctx == ieee80211_emulate_change_chanctx)) return NULL; if (WARN_ON(!ops->add_chanctx || !ops->remove_chanctx || !ops->change_chanctx || !ops->assign_vif_chanctx || !ops->unassign_vif_chanctx)) return NULL; emulate_chanctx = false; } /* Ensure 32-byte alignment of our private data and hw private data. * We use the wiphy priv data for both our ieee80211_local and for * the driver's private data * * In memory it'll be like this: * * +-------------------------+ * | struct wiphy | * +-------------------------+ * | struct ieee80211_local | * +-------------------------+ * | driver's private data | * +-------------------------+ * */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name); if (!wiphy) return NULL; wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes; wiphy->privid = mac80211_wiphy_privid; wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION | WIPHY_FLAG_REPORTS_OBSS | WIPHY_FLAG_OFFCHAN_TX; if (emulate_chanctx || ops->remain_on_channel) wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | NL80211_FEATURE_SAE | NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_VIF_TXPOWER | NL80211_FEATURE_MAC_ON_CREATE | NL80211_FEATURE_USERSPACE_MPM | NL80211_FEATURE_FULL_AP_CLIENT_STATE; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_FREQ_KHZ); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE); if (!ops->hw_scan) { wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_AP_SCAN; /* * if the driver behaves correctly using the probe request * (template) from mac80211, then both of these should be * supported even with hw scan - but let drivers opt in. */ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); } if (!ops->set_key) { wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT); } wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM); wiphy->bss_priv_size = sizeof(struct ieee80211_bss); local = wiphy_priv(wiphy); if (sta_info_init(local)) goto err_free; local->hw.wiphy = wiphy; local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); local->ops = ops; local->emulate_chanctx = emulate_chanctx; if (emulate_chanctx) ieee80211_hw_set(&local->hw, CHANCTX_STA_CSA); /* * We need a bit of data queued to build aggregates properly, so * instruct the TCP stack to allow more than a single ms of data * to be queued in the stack. The value is a bit-shift of 1 * second, so 7 is ~8ms of queued data. Only affects local TCP * sockets. * This is the default, anyhow - drivers may need to override it * for local reasons (longer buffers, longer completion time, or * similar). */ local->hw.tx_sk_pacing_shift = 7; /* set up some defaults */ local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | IEEE80211_RADIOTAP_MCS_HAVE_BW; local->hw.radiotap_vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI | IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; local->hw.uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; local->hw.uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; local->hw.max_mtu = IEEE80211_MAX_DATA_LEN; local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask; local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF; wiphy->extended_capabilities = local->ext_capa; wiphy->extended_capabilities_mask = local->ext_capa; wiphy->extended_capabilities_len = ARRAY_SIZE(local->ext_capa); INIT_LIST_HEAD(&local->interfaces); INIT_LIST_HEAD(&local->mon_list); __hw_addr_init(&local->mc_list); mutex_init(&local->iflist_mtx); spin_lock_init(&local->filter_lock); spin_lock_init(&local->rx_path_lock); spin_lock_init(&local->queue_stop_reason_lock); for (i = 0; i < IEEE80211_NUM_ACS; i++) { INIT_LIST_HEAD(&local->active_txqs[i]); spin_lock_init(&local->active_txq_lock[i]); local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; local->aql_txq_limit_high[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; atomic_set(&local->aql_ac_pending_airtime[i], 0); } local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX; local->aql_threshold = IEEE80211_AQL_THRESHOLD; atomic_set(&local->aql_total_pending_airtime, 0); spin_lock_init(&local->handle_wake_tx_queue_lock); INIT_LIST_HEAD(&local->chanctx_list); wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work); INIT_WORK(&local->restart_work, ieee80211_restart_work); wiphy_work_init(&local->radar_detected_work, ieee80211_dfs_radar_detected_work); wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter); wiphy_work_init(&local->dynamic_ps_enable_work, ieee80211_dynamic_ps_enable_work); wiphy_work_init(&local->dynamic_ps_disable_work, ieee80211_dynamic_ps_disable_work); timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0); wiphy_work_init(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); spin_lock_init(&local->ack_status_lock); idr_init(&local->ack_status_frames); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_head_init(&local->pending[i]); atomic_set(&local->agg_queue_stop[i], 0); } tasklet_setup(&local->tx_pending_tasklet, ieee80211_tx_pending); tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs); tasklet_setup(&local->tasklet, ieee80211_tasklet_handler); skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); ieee80211_alloc_led_names(local); ieee80211_roc_setup(local); local->hw.radiotap_timestamp.units_pos = -1; local->hw.radiotap_timestamp.accuracy = -1; return &local->hw; err_free: wiphy_free(wiphy); return NULL; } EXPORT_SYMBOL(ieee80211_alloc_hw_nm); static int ieee80211_init_cipher_suites(struct ieee80211_local *local) { bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */ bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); int r = 0, w = 0; u32 *suites; static const u32 cipher_suites[] = { /* keep WEP first, it may be removed below */ WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_CCMP_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, /* keep last -- depends on hw flags! */ WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, }; if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) || local->hw.wiphy->cipher_suites) { /* If the driver advertises, or doesn't support SW crypto, * we only need to remove WEP if necessary. */ if (have_wep) return 0; /* well if it has _no_ ciphers ... fine */ if (!local->hw.wiphy->n_cipher_suites) return 0; /* Driver provides cipher suites, but we need to exclude WEP */ suites = kmemdup_array(local->hw.wiphy->cipher_suites, local->hw.wiphy->n_cipher_suites, sizeof(u32), GFP_KERNEL); if (!suites) return -ENOMEM; for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { u32 suite = local->hw.wiphy->cipher_suites[r]; if (suite == WLAN_CIPHER_SUITE_WEP40 || suite == WLAN_CIPHER_SUITE_WEP104) continue; suites[w++] = suite; } } else { /* assign the (software supported and perhaps offloaded) * cipher suites */ local->hw.wiphy->cipher_suites = cipher_suites; local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); if (!have_mfp) local->hw.wiphy->n_cipher_suites -= 4; if (!have_wep) { local->hw.wiphy->cipher_suites += 2; local->hw.wiphy->n_cipher_suites -= 2; } /* not dynamically allocated, so just return */ return 0; } local->hw.wiphy->cipher_suites = suites; local->hw.wiphy->n_cipher_suites = w; local->wiphy_ciphers_allocated = true; return 0; } static bool ieee80211_ifcomb_check(const struct ieee80211_iface_combination *c, int n_comb) { int i, j; for (i = 0; i < n_comb; i++, c++) { /* DFS is not supported with multi-channel combinations yet */ if (c->radar_detect_widths && c->num_different_channels > 1) return false; /* mac80211 doesn't support more than one IBSS interface */ for (j = 0; j < c->n_limits; j++) if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && c->limits[j].max > 1) return false; } return true; } int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); int result, i; enum nl80211_band band; int channels, max_bitrates; bool supp_ht, supp_vht, supp_he, supp_eht; struct cfg80211_chan_def dflt_chandef = {}; if (ieee80211_hw_check(hw, QUEUE_CONTROL) && (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE || local->hw.offchannel_tx_hw_queue >= local->hw.queues)) return -EINVAL; if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && (!local->ops->tdls_channel_switch || !local->ops->tdls_cancel_channel_switch || !local->ops->tdls_recv_channel_switch)) return -EOPNOTSUPP; if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_TX_FRAG) && !local->ops->set_frag_threshold)) return -EINVAL; if (WARN_ON(local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN) && (!local->ops->start_nan || !local->ops->stop_nan))) return -EINVAL; if (hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) { /* * For drivers capable of doing MLO, assume modern driver * or firmware facilities, so software doesn't have to do * as much, e.g. monitoring beacons would be hard if we * might not even know which link is active at which time. */ if (WARN_ON(local->emulate_chanctx)) return -EINVAL; if (WARN_ON(!local->ops->link_info_changed)) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, HAS_RATE_CONTROL))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, AMPDU_AGGREGATION))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, HOST_BROADCAST_PS_BUFFERING))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_PS) && (!ieee80211_hw_check(hw, SUPPORTS_DYNAMIC_PS) || ieee80211_hw_check(hw, PS_NULLFUNC_STACK)))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, TIMING_BEACON_ONLY))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS))) return -EINVAL; } #ifdef CONFIG_PM if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) return -EINVAL; #endif if (local->emulate_chanctx) { for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *comb; comb = &local->hw.wiphy->iface_combinations[i]; if (comb->num_different_channels > 1) return -EINVAL; } } if (hw->wiphy->n_radio) { for (i = 0; i < hw->wiphy->n_radio; i++) { const struct wiphy_radio *radio = &hw->wiphy->radio[i]; if (!ieee80211_ifcomb_check(radio->iface_combinations, radio->n_iface_combinations)) return -EINVAL; } } else { if (!ieee80211_ifcomb_check(hw->wiphy->iface_combinations, hw->wiphy->n_iface_combinations)) return -EINVAL; } /* Only HW csum features are currently compatible with mac80211 */ if (WARN_ON(hw->netdev_features & ~MAC80211_SUPPORTED_FEATURES)) return -EINVAL; if (hw->max_report_rates == 0) hw->max_report_rates = hw->max_rates; local->rx_chains = 1; /* * generic code guarantees at least one band, * set this very early because much code assumes * that hw.conf.channel is assigned */ channels = 0; max_bitrates = 0; supp_ht = false; supp_vht = false; supp_he = false; supp_eht = false; for (band = 0; band < NUM_NL80211_BANDS; band++) { const struct ieee80211_sband_iftype_data *iftd; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; if (!sband) continue; if (!dflt_chandef.chan) { /* * Assign the first enabled channel to dflt_chandef * from the list of channels */ for (i = 0; i < sband->n_channels; i++) if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED)) break; /* if none found then use the first anyway */ if (i == sband->n_channels) i = 0; cfg80211_chandef_create(&dflt_chandef, &sband->channels[i], NL80211_CHAN_NO_HT); /* init channel we're on */ local->monitor_chanreq.oper = dflt_chandef; if (local->emulate_chanctx) { local->dflt_chandef = dflt_chandef; local->hw.conf.chandef = dflt_chandef; } } channels += sband->n_channels; /* * Due to the way the aggregation code handles this and it * being an HT capability, we can't really support delayed * BA in MLO (yet). */ if (WARN_ON(sband->ht_cap.ht_supported && (sband->ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) && hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; if (max_bitrates < sband->n_bitrates) max_bitrates = sband->n_bitrates; supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; for_each_sband_iftype_data(sband, i, iftd) { u8 he_40_mhz_cap; supp_he = supp_he || iftd->he_cap.has_he; supp_eht = supp_eht || iftd->eht_cap.has_eht; if (band == NL80211_BAND_2GHZ) he_40_mhz_cap = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; else he_40_mhz_cap = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; /* currently no support for HE client where HT has 40 MHz but not HT */ if (iftd->he_cap.has_he && iftd->types_mask & (BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT)) && sband->ht_cap.ht_supported && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap)) return -EINVAL; } /* HT, VHT, HE require QoS, thus >= 4 queues */ if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS && (supp_ht || supp_vht || supp_he))) return -EINVAL; /* EHT requires HE support */ if (WARN_ON(supp_eht && !supp_he)) return -EINVAL; if (!sband->ht_cap.ht_supported) continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ local->rx_chains = max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), local->rx_chains); /* no need to mask, SM_PS_DISABLED has all bits set */ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; } /* if low-level driver supports AP, we also support VLAN. * drivers advertising SW_CRYPTO_CONTROL should enable AP_VLAN * based on their support to transmit SW encrypted packets. */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP) && !ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL)) { hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); } /* mac80211 always supports monitor */ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + sizeof(void *) * channels, GFP_KERNEL); if (!local->int_scan_req) return -ENOMEM; eth_broadcast_addr(local->int_scan_req->bssid); for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; local->int_scan_req->rates[band] = (u32) -1; } #ifndef CONFIG_MAC80211_MESH /* mesh depends on Kconfig, but drivers should set it if they want */ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); #endif /* if the underlying driver supports mesh, mac80211 will (at least) * provide routing of mesh authentication frames to userspace */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH; /* mac80211 supports control port protocol changing */ local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; } else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; if (hw->max_signal <= 0) { result = -EINVAL; goto fail_workqueue; } } /* Mac80211 and therefore all drivers using SW crypto only * are able to handle PTK rekeys and Extended Key ID. */ if (!local->ops->set_key) { wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); } if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_ADHOC)) wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_DEL_IBSS_STA); /* * Calculate scan IE length -- we need this to alloc * memory and to subtract from the driver limit. It * includes the DS Params, (extended) supported rates, and HT * information -- SSID is the driver's responsibility. */ local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + 3 /* DS Params */; if (supp_ht) local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); if (supp_vht) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); /* * HE cap element is variable in size - set len to allow max size */ if (supp_he) { local->scan_ies_len += 3 + sizeof(struct ieee80211_he_cap_elem) + sizeof(struct ieee80211_he_mcs_nss_supp) + IEEE80211_HE_PPE_THRES_MAX_LEN; if (supp_eht) local->scan_ies_len += 3 + sizeof(struct ieee80211_eht_cap_elem) + sizeof(struct ieee80211_eht_mcs_nss_supp) + IEEE80211_EHT_PPE_THRES_MAX_LEN; } if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ local->hw.wiphy->max_scan_ssids = 4; local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; } /* * If the driver supports any scan IEs, then assume the * limit includes the IEs mac80211 will add, otherwise * leave it at zero and let the driver sort it out; we * still pass our IEs to the driver but userspace will * not be allowed to in that case. */ if (local->hw.wiphy->max_scan_ie_len) local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; result = ieee80211_init_cipher_suites(local); if (result < 0) goto fail_workqueue; if (!local->ops->remain_on_channel) local->hw.wiphy->max_remain_on_channel_duration = 5000; /* mac80211 based drivers don't support internal TDLS setup */ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; /* mac80211 supports eCSA, if the driver supports STA CSA at all */ if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING; /* mac80211 supports multi BSSID, if the driver supports it */ if (ieee80211_hw_check(&local->hw, SUPPORTS_MULTI_BSSID)) { local->hw.wiphy->support_mbssid = true; if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID)) local->hw.wiphy->support_only_he_mbssid = true; else local->ext_capa[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; } local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CNTDWN_COUNTERS_NUM; /* * We use the number of queues for feature tests (QoS, HT) internally * so restrict them appropriately. */ if (hw->queues > IEEE80211_MAX_QUEUES) hw->queues = IEEE80211_MAX_QUEUES; local->workqueue = alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy)); if (!local->workqueue) { result = -ENOMEM; goto fail_workqueue; } /* * The hardware needs headroom for sending the frame, * and we need some headroom for passing the frame to monitor * interfaces, but never both at the same time. */ local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, IEEE80211_TX_STATUS_HEADROOM); /* * if the driver doesn't specify a max listen interval we * use 5 which should be a safe default */ if (local->hw.max_listen_interval == 0) local->hw.max_listen_interval = 5; local->hw.conf.listen_interval = local->hw.max_listen_interval; local->dynamic_ps_forced_timeout = -1; if (!local->hw.max_nan_de_entries) local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID; if (!local->hw.weight_multiplier) local->hw.weight_multiplier = 1; ieee80211_wep_init(local); local->hw.conf.flags = IEEE80211_CONF_IDLE; ieee80211_led_init(local); result = ieee80211_txq_setup_flows(local); if (result) goto fail_flows; rtnl_lock(); result = ieee80211_init_rate_ctrl_alg(local, hw->rate_control_algorithm); rtnl_unlock(); if (result < 0) { wiphy_debug(local->hw.wiphy, "Failed to initialize rate control algorithm\n"); goto fail_rate; } if (local->rate_ctrl) { clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags); if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW) ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); } /* * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE, * or have it when we don't, copy the sband structure and set/clear it. * This is necessary because rate scaling algorithms could be switched * and have different support values. * Print a message so that in the common case the reallocation can be * avoided. */ BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated)); for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; bool local_cap, ie_cap; local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW); sband = local->hw.wiphy->bands[band]; if (!sband || !sband->vht_cap.vht_supported) continue; ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)); if (local_cap == ie_cap) continue; sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL); if (!sband) { result = -ENOMEM; goto fail_rate; } wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n", band); sband->vht_cap.vht_mcs.tx_highest ^= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); local->hw.wiphy->bands[band] = sband; local->sband_allocated |= BIT(band); } result = wiphy_register(local->hw.wiphy); if (result < 0) goto fail_wiphy_register; debugfs_hw_add(local); rate_control_add_debugfs(local); ieee80211_check_wbrf_support(local); rtnl_lock(); wiphy_lock(hw->wiphy); /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && !ieee80211_hw_check(hw, NO_AUTO_VIF)) { struct vif_params params = {0}; result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL, NL80211_IFTYPE_STATION, ¶ms); if (result) wiphy_warn(local->hw.wiphy, "Failed to add default virtual iface\n"); } wiphy_unlock(hw->wiphy); rtnl_unlock(); #ifdef CONFIG_INET local->ifa_notifier.notifier_call = ieee80211_ifa_changed; result = register_inetaddr_notifier(&local->ifa_notifier); if (result) goto fail_ifa; #endif #if IS_ENABLED(CONFIG_IPV6) local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed; result = register_inet6addr_notifier(&local->ifa6_notifier); if (result) goto fail_ifa6; #endif return 0; #if IS_ENABLED(CONFIG_IPV6) fail_ifa6: #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #endif #if defined(CONFIG_INET) || defined(CONFIG_IPV6) fail_ifa: #endif wiphy_unregister(local->hw.wiphy); fail_wiphy_register: rtnl_lock(); rate_control_deinitialize(local); ieee80211_remove_interfaces(local); rtnl_unlock(); fail_rate: ieee80211_txq_teardown_flows(local); fail_flows: ieee80211_led_exit(local); destroy_workqueue(local->workqueue); fail_workqueue: if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); local->wiphy_ciphers_allocated = false; } kfree(local->int_scan_req); return result; } EXPORT_SYMBOL(ieee80211_register_hw); void ieee80211_unregister_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); tasklet_kill(&local->tx_pending_tasklet); tasklet_kill(&local->tasklet); #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&local->ifa6_notifier); #endif rtnl_lock(); /* * At this point, interface list manipulations are fine * because the driver cannot be handing us frames any * more and the tasklet is killed. */ ieee80211_remove_interfaces(local); ieee80211_txq_teardown_flows(local); wiphy_lock(local->hw.wiphy); wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work); wiphy_work_cancel(local->hw.wiphy, &local->reconfig_filter); wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work); wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work); wiphy_unlock(local->hw.wiphy); rtnl_unlock(); cancel_work_sync(&local->restart_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); if (skb_queue_len(&local->skb_queue) || skb_queue_len(&local->skb_queue_unreliable)) wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); skb_queue_purge(&local->skb_queue); skb_queue_purge(&local->skb_queue_unreliable); wiphy_unregister(local->hw.wiphy); destroy_workqueue(local->workqueue); ieee80211_led_exit(local); kfree(local->int_scan_req); } EXPORT_SYMBOL(ieee80211_unregister_hw); static int ieee80211_free_ack_frame(int id, void *p, void *data) { WARN_ONCE(1, "Have pending ack frames!\n"); kfree_skb(p); return 0; } void ieee80211_free_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); enum nl80211_band band; mutex_destroy(&local->iflist_mtx); if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); local->wiphy_ciphers_allocated = false; } idr_for_each(&local->ack_status_frames, ieee80211_free_ack_frame, NULL); idr_destroy(&local->ack_status_frames); sta_info_stop(local); ieee80211_free_led_names(local); for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!(local->sband_allocated & BIT(band))) continue; kfree(local->hw.wiphy->bands[band]); } wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); static const char * const drop_reasons_monitor[] = { #define V(x) #x, [0] = "RX_DROP_MONITOR", MAC80211_DROP_REASONS_MONITOR(V) }; static struct drop_reason_list drop_reason_list_monitor = { .reasons = drop_reasons_monitor, .n_reasons = ARRAY_SIZE(drop_reasons_monitor), }; static const char * const drop_reasons_unusable[] = { [0] = "RX_DROP_UNUSABLE", MAC80211_DROP_REASONS_UNUSABLE(V) #undef V }; static struct drop_reason_list drop_reason_list_unusable = { .reasons = drop_reasons_unusable, .n_reasons = ARRAY_SIZE(drop_reasons_unusable), }; static int __init ieee80211_init(void) { struct sk_buff *skb; int ret; BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) + IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb)); ret = rc80211_minstrel_init(); if (ret) return ret; ret = ieee80211_iface_init(); if (ret) goto err_netdev; drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR, &drop_reason_list_monitor); drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE, &drop_reason_list_unusable); return 0; err_netdev: rc80211_minstrel_exit(); return ret; } static void __exit ieee80211_exit(void) { rc80211_minstrel_exit(); ieee80211s_stop(); ieee80211_iface_exit(); drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR); drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE); rcu_barrier(); } subsys_initcall(ieee80211_init); module_exit(ieee80211_exit); MODULE_DESCRIPTION("IEEE 802.11 subsystem"); MODULE_LICENSE("GPL"); |
112 111 111 111 111 111 111 113 110 113 113 112 111 111 111 111 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 | // SPDX-License-Identifier: GPL-2.0 /* * NETLINK Netlink attributes * * Authors: Thomas Graf <tgraf@suug.ch> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/nospec.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <net/netlink.h> /* For these data types, attribute length should be exactly the given * size. However, to maintain compatibility with broken commands, if the * attribute length does not match the expected size a warning is emitted * to the user that the command is sending invalid data and needs to be fixed. */ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), [NLA_BE16] = sizeof(__be16), [NLA_BE32] = sizeof(__be32), }; static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), [NLA_BE16] = sizeof(__be16), [NLA_BE32] = sizeof(__be32), }; /* * Nested policies might refer back to the original * policy in some cases, and userspace could try to * abuse that and recurse by nesting in the right * ways. Limit recursion to avoid this problem. */ #define MAX_POLICY_RECURSION_DEPTH 10 static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, struct nlattr **tb, unsigned int depth); static int validate_nla_bitfield32(const struct nlattr *nla, const u32 valid_flags_mask) { const struct nla_bitfield32 *bf = nla_data(nla); if (!valid_flags_mask) return -EINVAL; /*disallow invalid bit selector */ if (bf->selector & ~valid_flags_mask) return -EINVAL; /*disallow invalid bit values */ if (bf->value & ~valid_flags_mask) return -EINVAL; /*disallow valid bit values that are not selected*/ if (bf->value & ~bf->selector) return -EINVAL; return 0; } static int nla_validate_array(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack, unsigned int validate, unsigned int depth) { const struct nlattr *entry; int rem; nla_for_each_attr(entry, head, len, rem) { int ret; if (nla_len(entry) == 0) continue; if (nla_len(entry) < NLA_HDRLEN) { NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy, "Array element too short"); return -ERANGE; } ret = __nla_validate_parse(nla_data(entry), nla_len(entry), maxtype, policy, validate, extack, NULL, depth + 1); if (ret < 0) return ret; } return 0; } void nla_get_range_unsigned(const struct nla_policy *pt, struct netlink_range_validation *range) { WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR && (pt->min < 0 || pt->max < 0)); range->min = 0; switch (pt->type) { case NLA_U8: range->max = U8_MAX; break; case NLA_U16: case NLA_BE16: case NLA_BINARY: range->max = U16_MAX; break; case NLA_U32: case NLA_BE32: range->max = U32_MAX; break; case NLA_U64: case NLA_UINT: case NLA_MSECS: range->max = U64_MAX; break; default: WARN_ON_ONCE(1); return; } switch (pt->validation_type) { case NLA_VALIDATE_RANGE: case NLA_VALIDATE_RANGE_WARN_TOO_LONG: range->min = pt->min; range->max = pt->max; break; case NLA_VALIDATE_RANGE_PTR: *range = *pt->range; break; case NLA_VALIDATE_MIN: range->min = pt->min; break; case NLA_VALIDATE_MAX: range->max = pt->max; break; default: break; } } static int nla_validate_range_unsigned(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack, unsigned int validate) { struct netlink_range_validation range; u64 value; switch (pt->type) { case NLA_U8: value = nla_get_u8(nla); break; case NLA_U16: value = nla_get_u16(nla); break; case NLA_U32: value = nla_get_u32(nla); break; case NLA_U64: value = nla_get_u64(nla); break; case NLA_UINT: value = nla_get_uint(nla); break; case NLA_MSECS: value = nla_get_u64(nla); break; case NLA_BINARY: value = nla_len(nla); break; case NLA_BE16: value = ntohs(nla_get_be16(nla)); break; case NLA_BE32: value = ntohl(nla_get_be32(nla)); break; default: return -EINVAL; } nla_get_range_unsigned(pt, &range); if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG && pt->type == NLA_BINARY && value > range.max) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, pt->type); if (validate & NL_VALIDATE_STRICT_ATTRS) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "invalid attribute length"); return -EINVAL; } /* this assumes min <= max (don't validate against min) */ return 0; } if (value < range.min || value > range.max) { bool binary = pt->type == NLA_BINARY; if (binary) NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "binary attribute size out of range"); else NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "integer out of range"); return -ERANGE; } return 0; } void nla_get_range_signed(const struct nla_policy *pt, struct netlink_range_validation_signed *range) { switch (pt->type) { case NLA_S8: range->min = S8_MIN; range->max = S8_MAX; break; case NLA_S16: range->min = S16_MIN; range->max = S16_MAX; break; case NLA_S32: range->min = S32_MIN; range->max = S32_MAX; break; case NLA_S64: case NLA_SINT: range->min = S64_MIN; range->max = S64_MAX; break; default: WARN_ON_ONCE(1); return; } switch (pt->validation_type) { case NLA_VALIDATE_RANGE: range->min = pt->min; range->max = pt->max; break; case NLA_VALIDATE_RANGE_PTR: *range = *pt->range_signed; break; case NLA_VALIDATE_MIN: range->min = pt->min; break; case NLA_VALIDATE_MAX: range->max = pt->max; break; default: break; } } static int nla_validate_int_range_signed(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack) { struct netlink_range_validation_signed range; s64 value; switch (pt->type) { case NLA_S8: value = nla_get_s8(nla); break; case NLA_S16: value = nla_get_s16(nla); break; case NLA_S32: value = nla_get_s32(nla); break; case NLA_S64: value = nla_get_s64(nla); break; case NLA_SINT: value = nla_get_sint(nla); break; default: return -EINVAL; } nla_get_range_signed(pt, &range); if (value < range.min || value > range.max) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "integer out of range"); return -ERANGE; } return 0; } static int nla_validate_int_range(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack, unsigned int validate) { switch (pt->type) { case NLA_U8: case NLA_U16: case NLA_U32: case NLA_U64: case NLA_UINT: case NLA_MSECS: case NLA_BINARY: case NLA_BE16: case NLA_BE32: return nla_validate_range_unsigned(pt, nla, extack, validate); case NLA_S8: case NLA_S16: case NLA_S32: case NLA_S64: case NLA_SINT: return nla_validate_int_range_signed(pt, nla, extack); default: WARN_ON(1); return -EINVAL; } } static int nla_validate_mask(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack) { u64 value; switch (pt->type) { case NLA_U8: value = nla_get_u8(nla); break; case NLA_U16: value = nla_get_u16(nla); break; case NLA_U32: value = nla_get_u32(nla); break; case NLA_U64: value = nla_get_u64(nla); break; case NLA_UINT: value = nla_get_uint(nla); break; case NLA_BE16: value = ntohs(nla_get_be16(nla)); break; case NLA_BE32: value = ntohl(nla_get_be32(nla)); break; default: return -EINVAL; } if (value & ~(u64)pt->mask) { NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set"); return -EINVAL; } return 0; } static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, unsigned int depth) { u16 strict_start_type = policy[0].strict_start_type; const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); int err = -ERANGE; if (strict_start_type && type >= strict_start_type) validate |= NL_VALIDATE_STRICT; if (type <= 0 || type > maxtype) return 0; type = array_index_nospec(type, maxtype + 1); pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, type); if (validate & NL_VALIDATE_STRICT_ATTRS) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "invalid attribute length"); return -EINVAL; } } if (validate & NL_VALIDATE_NESTED) { if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) && !(nla->nla_type & NLA_F_NESTED)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "NLA_F_NESTED is missing"); return -EINVAL; } if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY && pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "NLA_F_NESTED not expected"); return -EINVAL; } } switch (pt->type) { case NLA_REJECT: if (extack && pt->reject_message) { NL_SET_BAD_ATTR(extack, nla); extack->_msg = pt->reject_message; return -EINVAL; } err = -EINVAL; goto out_err; case NLA_FLAG: if (attrlen > 0) goto out_err; break; case NLA_SINT: case NLA_UINT: if (attrlen != sizeof(u32) && attrlen != sizeof(u64)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "invalid attribute length"); return -EINVAL; } break; case NLA_BITFIELD32: if (attrlen != sizeof(struct nla_bitfield32)) goto out_err; err = validate_nla_bitfield32(nla, pt->bitfield32_valid); if (err) goto out_err; break; case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); else minlen = attrlen; if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) { err = -EINVAL; goto out_err; } fallthrough; case NLA_STRING: if (attrlen < 1) goto out_err; if (pt->len) { char *buf = nla_data(nla); if (buf[attrlen - 1] == '\0') attrlen--; if (attrlen > pt->len) goto out_err; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) goto out_err; break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; if (attrlen < NLA_HDRLEN) goto out_err; if (pt->nested_policy) { err = __nla_validate_parse(nla_data(nla), nla_len(nla), pt->len, pt->nested_policy, validate, extack, NULL, depth + 1); if (err < 0) { /* * return directly to preserve the inner * error message/attribute pointer */ return err; } } break; case NLA_NESTED_ARRAY: /* a nested array attribute is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; if (attrlen < NLA_HDRLEN) goto out_err; if (pt->nested_policy) { int err; err = nla_validate_array(nla_data(nla), nla_len(nla), pt->len, pt->nested_policy, extack, validate, depth); if (err < 0) { /* * return directly to preserve the inner * error message/attribute pointer */ return err; } } break; case NLA_UNSPEC: if (validate & NL_VALIDATE_UNSPEC) { NL_SET_ERR_MSG_ATTR(extack, nla, "Unsupported attribute"); return -EINVAL; } if (attrlen < pt->len) goto out_err; break; default: if (pt->len) minlen = pt->len; else minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) goto out_err; } /* further validation */ switch (pt->validation_type) { case NLA_VALIDATE_NONE: /* nothing to do */ break; case NLA_VALIDATE_RANGE_PTR: case NLA_VALIDATE_RANGE: case NLA_VALIDATE_RANGE_WARN_TOO_LONG: case NLA_VALIDATE_MIN: case NLA_VALIDATE_MAX: err = nla_validate_int_range(pt, nla, extack, validate); if (err) return err; break; case NLA_VALIDATE_MASK: err = nla_validate_mask(pt, nla, extack); if (err) return err; break; case NLA_VALIDATE_FUNCTION: if (pt->validate) { err = pt->validate(nla, extack); if (err) return err; } break; } return 0; out_err: NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "Attribute failed policy validation"); return err; } static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, struct nlattr **tb, unsigned int depth) { const struct nlattr *nla; int rem; if (depth >= MAX_POLICY_RECURSION_DEPTH) { NL_SET_ERR_MSG(extack, "allowed policy recursion depth exceeded"); return -EINVAL; } if (tb) memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); if (type == 0 || type > maxtype) { if (validate & NL_VALIDATE_MAXTYPE) { NL_SET_ERR_MSG_ATTR(extack, nla, "Unknown attribute type"); return -EINVAL; } continue; } type = array_index_nospec(type, maxtype + 1); if (policy) { int err = validate_nla(nla, maxtype, policy, validate, extack, depth); if (err < 0) return err; } if (tb) tb[type] = (struct nlattr *)nla; } if (unlikely(rem > 0)) { pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", rem, current->comm); NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes"); if (validate & NL_VALIDATE_TRAILING) return -EINVAL; } return 0; } /** * __nla_validate - Validate a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @maxtype: maximum attribute type to be expected * @policy: validation policy * @validate: validation strictness * @extack: extended ACK report struct * * Validates all attributes in the specified attribute stream against the * specified policy. Validation depends on the validate flags passed, see * &enum netlink_validation for more details on that. * See documentation of struct nla_policy for more details. * * Returns 0 on success or a negative error code. */ int __nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack) { return __nla_validate_parse(head, len, maxtype, policy, validate, extack, NULL, 0); } EXPORT_SYMBOL(__nla_validate); /** * nla_policy_len - Determine the max. length of a policy * @p: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used * to allocated Netlink buffers roughly the size of the actual * message. * * Returns 0 on success or a negative error code. */ int nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_len[p->type]) len += nla_total_size(nla_attr_len[p->type]); else if (nla_attr_minlen[p->type]) len += nla_total_size(nla_attr_minlen[p->type]); } return len; } EXPORT_SYMBOL(nla_policy_len); /** * __nla_parse - Parse a stream of attributes into a tb buffer * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @head: head of attribute stream * @len: length of attribute stream * @policy: validation policy * @validate: validation strictness * @extack: extended ACK pointer * * Parses a stream of attributes and stores a pointer to each attribute in * the tb array accessible via the attribute type. * Validation is controlled by the @validate parameter. * * Returns 0 on success or a negative error code. */ int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack) { return __nla_validate_parse(head, len, maxtype, policy, validate, extack, tb, 0); } EXPORT_SYMBOL(__nla_parse); /** * nla_find - Find a specific attribute in a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @attrtype: type of attribute to look for * * Returns the first attribute in the stream matching the specified type. */ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype) { const struct nlattr *nla; int rem; nla_for_each_attr(nla, head, len, rem) if (nla_type(nla) == attrtype) return (struct nlattr *)nla; return NULL; } EXPORT_SYMBOL(nla_find); /** * nla_strscpy - Copy string attribute payload into a sized buffer * @dst: Where to copy the string to. * @nla: Attribute to copy the string from. * @dstsize: Size of destination buffer. * * Copies at most dstsize - 1 bytes into the destination buffer. * Unlike strscpy() the destination buffer is always padded out. * * Return: * * srclen - Returns @nla length (not including the trailing %NUL). * * -E2BIG - If @dstsize is 0 or greater than U16_MAX or @nla length greater * than @dstsize. */ ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize) { size_t srclen = nla_len(nla); char *src = nla_data(nla); ssize_t ret; size_t len; if (dstsize == 0 || WARN_ON_ONCE(dstsize > U16_MAX)) return -E2BIG; if (srclen > 0 && src[srclen - 1] == '\0') srclen--; if (srclen >= dstsize) { len = dstsize - 1; ret = -E2BIG; } else { len = srclen; ret = len; } memcpy(dst, src, len); /* Zero pad end of dst. */ memset(dst + len, 0, dstsize - len); return ret; } EXPORT_SYMBOL(nla_strscpy); /** * nla_strdup - Copy string attribute payload into a newly allocated buffer * @nla: attribute to copy the string from * @flags: the type of memory to allocate (see kmalloc). * * Returns a pointer to the allocated buffer or NULL on error. */ char *nla_strdup(const struct nlattr *nla, gfp_t flags) { size_t srclen = nla_len(nla); char *src = nla_data(nla), *dst; if (srclen > 0 && src[srclen - 1] == '\0') srclen--; dst = kmalloc(srclen + 1, flags); if (dst != NULL) { memcpy(dst, src, srclen); dst[srclen] = '\0'; } return dst; } EXPORT_SYMBOL(nla_strdup); /** * nla_memcpy - Copy a netlink attribute into another memory area * @dest: where to copy to memcpy * @src: netlink attribute to copy from * @count: size of the destination area * * Note: The number of bytes copied is limited by the length of * attribute's payload. memcpy * * Returns the number of bytes copied. */ int nla_memcpy(void *dest, const struct nlattr *src, int count) { int minlen = min_t(int, count, nla_len(src)); memcpy(dest, nla_data(src), minlen); if (count > minlen) memset(dest + minlen, 0, count - minlen); return minlen; } EXPORT_SYMBOL(nla_memcpy); /** * nla_memcmp - Compare an attribute with sized memory area * @nla: netlink attribute * @data: memory area * @size: size of memory area */ int nla_memcmp(const struct nlattr *nla, const void *data, size_t size) { int d = nla_len(nla) - size; if (d == 0) d = memcmp(nla_data(nla), data, size); return d; } EXPORT_SYMBOL(nla_memcmp); /** * nla_strcmp - Compare a string attribute against a string * @nla: netlink string attribute * @str: another string */ int nla_strcmp(const struct nlattr *nla, const char *str) { int len = strlen(str); char *buf = nla_data(nla); int attrlen = nla_len(nla); int d; while (attrlen > 0 && buf[attrlen - 1] == '\0') attrlen--; d = attrlen - len; if (d == 0) d = memcmp(nla_data(nla), str, len); return d; } EXPORT_SYMBOL(nla_strcmp); #ifdef CONFIG_NET /** * __nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; nla = skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen)); return nla; } EXPORT_SYMBOL(__nla_reserve); /** * __nla_reserve_64bit - reserve room for attribute on the skb and align it * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * @padattr: attribute type for the padding * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this * attribute will have a 64-bit aligned nla_data() area. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr) { nla_align_64bit(skb, padattr); return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL(__nla_reserve_64bit); /** * __nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * The caller is responsible to ensure that the skb provides enough * tailroom for the payload. */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { return skb_put_zero(skb, NLA_ALIGN(attrlen)); } EXPORT_SYMBOL(__nla_reserve_nohdr); /** * nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return NULL; return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL(nla_reserve); /** * nla_reserve_64bit - reserve room for attribute on the skb and align it * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * @padattr: attribute type for the padding * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this * attribute will have a 64-bit aligned nla_data() area. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr) { size_t len; if (nla_need_padding_for_64bit(skb)) len = nla_total_size_64bit(attrlen); else len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len)) return NULL; return __nla_reserve_64bit(skb, attrtype, attrlen, padattr); } EXPORT_SYMBOL(nla_reserve_64bit); /** * nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute payload. */ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return NULL; return __nla_reserve_nohdr(skb, attrlen); } EXPORT_SYMBOL(nla_reserve_nohdr); /** * __nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { struct nlattr *nla; nla = __nla_reserve(skb, attrtype, attrlen); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL(__nla_put); /** * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * @padattr: attribute type for the padding * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr) { struct nlattr *nla; nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL(__nla_put_64bit); /** * __nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute payload. */ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { void *start; start = __nla_reserve_nohdr(skb, attrlen); memcpy(start, data, attrlen); } EXPORT_SYMBOL(__nla_put_nohdr); /** * nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return -EMSGSIZE; __nla_put(skb, attrtype, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put); /** * nla_put_64bit - Add a netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * @padattr: attribute type for the padding * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr) { size_t len; if (nla_need_padding_for_64bit(skb)) len = nla_total_size_64bit(attrlen); else len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len)) return -EMSGSIZE; __nla_put_64bit(skb, attrtype, attrlen, data, padattr); return 0; } EXPORT_SYMBOL(nla_put_64bit); /** * nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; __nla_put_nohdr(skb, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put_nohdr); /** * nla_append - Add a netlink attribute without header or padding * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_append(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; skb_put_data(skb, data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); #endif |
3 1 3 1 2 2 2 2 3 3 1 3 3 5 2 3 1 1 1 1 1 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 | // SPDX-License-Identifier: GPL-2.0+ /* * mxuport.c - MOXA UPort series driver * * Copyright (c) 2006 Moxa Technologies Co., Ltd. * Copyright (c) 2013 Andrew Lunn <andrew@lunn.ch> * * Supports the following Moxa USB to serial converters: * 2 ports : UPort 1250, UPort 1250I * 4 ports : UPort 1410, UPort 1450, UPort 1450I * 8 ports : UPort 1610-8, UPort 1650-8 * 16 ports : UPort 1610-16, UPort 1650-16 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/jiffies.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/unaligned.h> /* Definitions for the vendor ID and device ID */ #define MX_USBSERIAL_VID 0x110A #define MX_UPORT1250_PID 0x1250 #define MX_UPORT1251_PID 0x1251 #define MX_UPORT1410_PID 0x1410 #define MX_UPORT1450_PID 0x1450 #define MX_UPORT1451_PID 0x1451 #define MX_UPORT1618_PID 0x1618 #define MX_UPORT1658_PID 0x1658 #define MX_UPORT1613_PID 0x1613 #define MX_UPORT1653_PID 0x1653 /* Definitions for USB info */ #define HEADER_SIZE 4 #define EVENT_LENGTH 8 #define DOWN_BLOCK_SIZE 64 /* Definitions for firmware info */ #define VER_ADDR_1 0x20 #define VER_ADDR_2 0x24 #define VER_ADDR_3 0x28 /* Definitions for USB vendor request */ #define RQ_VENDOR_NONE 0x00 #define RQ_VENDOR_SET_BAUD 0x01 /* Set baud rate */ #define RQ_VENDOR_SET_LINE 0x02 /* Set line status */ #define RQ_VENDOR_SET_CHARS 0x03 /* Set Xon/Xoff chars */ #define RQ_VENDOR_SET_RTS 0x04 /* Set RTS */ #define RQ_VENDOR_SET_DTR 0x05 /* Set DTR */ #define RQ_VENDOR_SET_XONXOFF 0x06 /* Set auto Xon/Xoff */ #define RQ_VENDOR_SET_RX_HOST_EN 0x07 /* Set RX host enable */ #define RQ_VENDOR_SET_OPEN 0x08 /* Set open/close port */ #define RQ_VENDOR_PURGE 0x09 /* Purge Rx/Tx buffer */ #define RQ_VENDOR_SET_MCR 0x0A /* Set MCR register */ #define RQ_VENDOR_SET_BREAK 0x0B /* Set Break signal */ #define RQ_VENDOR_START_FW_DOWN 0x0C /* Start firmware download */ #define RQ_VENDOR_STOP_FW_DOWN 0x0D /* Stop firmware download */ #define RQ_VENDOR_QUERY_FW_READY 0x0E /* Query if new firmware ready */ #define RQ_VENDOR_SET_FIFO_DISABLE 0x0F /* Set fifo disable */ #define RQ_VENDOR_SET_INTERFACE 0x10 /* Set interface */ #define RQ_VENDOR_SET_HIGH_PERFOR 0x11 /* Set hi-performance */ #define RQ_VENDOR_ERASE_BLOCK 0x12 /* Erase flash block */ #define RQ_VENDOR_WRITE_PAGE 0x13 /* Write flash page */ #define RQ_VENDOR_PREPARE_WRITE 0x14 /* Prepare write flash */ #define RQ_VENDOR_CONFIRM_WRITE 0x15 /* Confirm write flash */ #define RQ_VENDOR_LOCATE 0x16 /* Locate the device */ #define RQ_VENDOR_START_ROM_DOWN 0x17 /* Start firmware download */ #define RQ_VENDOR_ROM_DATA 0x18 /* Rom file data */ #define RQ_VENDOR_STOP_ROM_DOWN 0x19 /* Stop firmware download */ #define RQ_VENDOR_FW_DATA 0x20 /* Firmware data */ #define RQ_VENDOR_RESET_DEVICE 0x23 /* Try to reset the device */ #define RQ_VENDOR_QUERY_FW_CONFIG 0x24 #define RQ_VENDOR_GET_VERSION 0x81 /* Get firmware version */ #define RQ_VENDOR_GET_PAGE 0x82 /* Read flash page */ #define RQ_VENDOR_GET_ROM_PROC 0x83 /* Get ROM process state */ #define RQ_VENDOR_GET_INQUEUE 0x84 /* Data in input buffer */ #define RQ_VENDOR_GET_OUTQUEUE 0x85 /* Data in output buffer */ #define RQ_VENDOR_GET_MSR 0x86 /* Get modem status register */ /* Definitions for UPort event type */ #define UPORT_EVENT_NONE 0 /* None */ #define UPORT_EVENT_TXBUF_THRESHOLD 1 /* Tx buffer threshold */ #define UPORT_EVENT_SEND_NEXT 2 /* Send next */ #define UPORT_EVENT_MSR 3 /* Modem status */ #define UPORT_EVENT_LSR 4 /* Line status */ #define UPORT_EVENT_MCR 5 /* Modem control */ /* Definitions for serial event type */ #define SERIAL_EV_CTS 0x0008 /* CTS changed state */ #define SERIAL_EV_DSR 0x0010 /* DSR changed state */ #define SERIAL_EV_RLSD 0x0020 /* RLSD changed state */ /* Definitions for modem control event type */ #define SERIAL_EV_XOFF 0x40 /* XOFF received */ /* Definitions for line control of communication */ #define MX_WORDLENGTH_5 5 #define MX_WORDLENGTH_6 6 #define MX_WORDLENGTH_7 7 #define MX_WORDLENGTH_8 8 #define MX_PARITY_NONE 0 #define MX_PARITY_ODD 1 #define MX_PARITY_EVEN 2 #define MX_PARITY_MARK 3 #define MX_PARITY_SPACE 4 #define MX_STOP_BITS_1 0 #define MX_STOP_BITS_1_5 1 #define MX_STOP_BITS_2 2 #define MX_RTS_DISABLE 0x0 #define MX_RTS_ENABLE 0x1 #define MX_RTS_HW 0x2 #define MX_RTS_NO_CHANGE 0x3 /* Flag, not valid register value*/ #define MX_INT_RS232 0 #define MX_INT_2W_RS485 1 #define MX_INT_RS422 2 #define MX_INT_4W_RS485 3 /* Definitions for holding reason */ #define MX_WAIT_FOR_CTS 0x0001 #define MX_WAIT_FOR_DSR 0x0002 #define MX_WAIT_FOR_DCD 0x0004 #define MX_WAIT_FOR_XON 0x0008 #define MX_WAIT_FOR_START_TX 0x0010 #define MX_WAIT_FOR_UNTHROTTLE 0x0020 #define MX_WAIT_FOR_LOW_WATER 0x0040 #define MX_WAIT_FOR_SEND_NEXT 0x0080 #define MX_UPORT_2_PORT BIT(0) #define MX_UPORT_4_PORT BIT(1) #define MX_UPORT_8_PORT BIT(2) #define MX_UPORT_16_PORT BIT(3) /* This structure holds all of the local port information */ struct mxuport_port { u8 mcr_state; /* Last MCR state */ u8 msr_state; /* Last MSR state */ struct mutex mutex; /* Protects mcr_state */ spinlock_t spinlock; /* Protects msr_state */ }; /* Table of devices that work with this driver */ static const struct usb_device_id mxuport_idtable[] = { { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1250_PID), .driver_info = MX_UPORT_2_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1251_PID), .driver_info = MX_UPORT_2_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1410_PID), .driver_info = MX_UPORT_4_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1450_PID), .driver_info = MX_UPORT_4_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1451_PID), .driver_info = MX_UPORT_4_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1618_PID), .driver_info = MX_UPORT_8_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1658_PID), .driver_info = MX_UPORT_8_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1613_PID), .driver_info = MX_UPORT_16_PORT }, { USB_DEVICE(MX_USBSERIAL_VID, MX_UPORT1653_PID), .driver_info = MX_UPORT_16_PORT }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, mxuport_idtable); /* * Add a four byte header containing the port number and the number of * bytes of data in the message. Return the number of bytes in the * buffer. */ static int mxuport_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size) { u8 *buf = dest; int count; count = kfifo_out_locked(&port->write_fifo, buf + HEADER_SIZE, size - HEADER_SIZE, &port->lock); put_unaligned_be16(port->port_number, buf); put_unaligned_be16(count, buf + 2); dev_dbg(&port->dev, "%s - size %zd count %d\n", __func__, size, count); return count + HEADER_SIZE; } /* Read the given buffer in from the control pipe. */ static int mxuport_recv_ctrl_urb(struct usb_serial *serial, u8 request, u16 value, u16 index, u8 *data, size_t size) { int status; status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), request, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE), value, index, data, size, USB_CTRL_GET_TIMEOUT); if (status < 0) { dev_err(&serial->interface->dev, "%s - usb_control_msg failed (%d)\n", __func__, status); return status; } if (status != size) { dev_err(&serial->interface->dev, "%s - short read (%d / %zd)\n", __func__, status, size); return -EIO; } return status; } /* Write the given buffer out to the control pipe. */ static int mxuport_send_ctrl_data_urb(struct usb_serial *serial, u8 request, u16 value, u16 index, u8 *data, size_t size) { int status; status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE), value, index, data, size, USB_CTRL_SET_TIMEOUT); if (status < 0) { dev_err(&serial->interface->dev, "%s - usb_control_msg failed (%d)\n", __func__, status); return status; } return 0; } /* Send a vendor request without any data */ static int mxuport_send_ctrl_urb(struct usb_serial *serial, u8 request, u16 value, u16 index) { return mxuport_send_ctrl_data_urb(serial, request, value, index, NULL, 0); } /* * mxuport_throttle - throttle function of driver * * This function is called by the tty driver when it wants to stop the * data being read from the port. Since all the data comes over one * bulk in endpoint, we cannot stop submitting urbs by setting * port->throttle. Instead tell the device to stop sending us data for * the port. */ static void mxuport_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; dev_dbg(&port->dev, "%s\n", __func__); mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN, 0, port->port_number); } /* * mxuport_unthrottle - unthrottle function of driver * * This function is called by the tty driver when it wants to resume * the data being read from the port. Tell the device it can resume * sending us received data from the port. */ static void mxuport_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; dev_dbg(&port->dev, "%s\n", __func__); mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN, 1, port->port_number); } /* * Processes one chunk of data received for a port. Mostly a copy of * usb_serial_generic_process_read_urb(). */ static void mxuport_process_read_urb_data(struct usb_serial_port *port, char *data, int size) { int i; if (port->sysrq) { for (i = 0; i < size; i++, data++) { if (!usb_serial_handle_sysrq_char(port, *data)) tty_insert_flip_char(&port->port, *data, TTY_NORMAL); } } else { tty_insert_flip_string(&port->port, data, size); } tty_flip_buffer_push(&port->port); } static void mxuport_msr_event(struct usb_serial_port *port, u8 buf[4]) { struct mxuport_port *mxport = usb_get_serial_port_data(port); u8 rcv_msr_hold = buf[2] & 0xF0; u16 rcv_msr_event = get_unaligned_be16(buf); unsigned long flags; if (rcv_msr_event == 0) return; /* Update MSR status */ spin_lock_irqsave(&mxport->spinlock, flags); dev_dbg(&port->dev, "%s - current MSR status = 0x%x\n", __func__, mxport->msr_state); if (rcv_msr_hold & UART_MSR_CTS) { mxport->msr_state |= UART_MSR_CTS; dev_dbg(&port->dev, "%s - CTS high\n", __func__); } else { mxport->msr_state &= ~UART_MSR_CTS; dev_dbg(&port->dev, "%s - CTS low\n", __func__); } if (rcv_msr_hold & UART_MSR_DSR) { mxport->msr_state |= UART_MSR_DSR; dev_dbg(&port->dev, "%s - DSR high\n", __func__); } else { mxport->msr_state &= ~UART_MSR_DSR; dev_dbg(&port->dev, "%s - DSR low\n", __func__); } if (rcv_msr_hold & UART_MSR_DCD) { mxport->msr_state |= UART_MSR_DCD; dev_dbg(&port->dev, "%s - DCD high\n", __func__); } else { mxport->msr_state &= ~UART_MSR_DCD; dev_dbg(&port->dev, "%s - DCD low\n", __func__); } spin_unlock_irqrestore(&mxport->spinlock, flags); if (rcv_msr_event & (SERIAL_EV_CTS | SERIAL_EV_DSR | SERIAL_EV_RLSD)) { if (rcv_msr_event & SERIAL_EV_CTS) { port->icount.cts++; dev_dbg(&port->dev, "%s - CTS change\n", __func__); } if (rcv_msr_event & SERIAL_EV_DSR) { port->icount.dsr++; dev_dbg(&port->dev, "%s - DSR change\n", __func__); } if (rcv_msr_event & SERIAL_EV_RLSD) { port->icount.dcd++; dev_dbg(&port->dev, "%s - DCD change\n", __func__); } wake_up_interruptible(&port->port.delta_msr_wait); } } static void mxuport_lsr_event(struct usb_serial_port *port, u8 buf[4]) { u8 lsr_event = buf[2]; if (lsr_event & UART_LSR_BI) { port->icount.brk++; dev_dbg(&port->dev, "%s - break error\n", __func__); } if (lsr_event & UART_LSR_FE) { port->icount.frame++; dev_dbg(&port->dev, "%s - frame error\n", __func__); } if (lsr_event & UART_LSR_PE) { port->icount.parity++; dev_dbg(&port->dev, "%s - parity error\n", __func__); } if (lsr_event & UART_LSR_OE) { port->icount.overrun++; dev_dbg(&port->dev, "%s - overrun error\n", __func__); } } /* * When something interesting happens, modem control lines XON/XOFF * etc, the device sends an event. Process these events. */ static void mxuport_process_read_urb_event(struct usb_serial_port *port, u8 buf[4], u32 event) { dev_dbg(&port->dev, "%s - receive event : %04x\n", __func__, event); switch (event) { case UPORT_EVENT_SEND_NEXT: /* * Sent as part of the flow control on device buffers. * Not currently used. */ break; case UPORT_EVENT_MSR: mxuport_msr_event(port, buf); break; case UPORT_EVENT_LSR: mxuport_lsr_event(port, buf); break; case UPORT_EVENT_MCR: /* * Event to indicate a change in XON/XOFF from the * peer. Currently not used. We just continue * sending the device data and it will buffer it if * needed. This event could be used for flow control * between the host and the device. */ break; default: dev_dbg(&port->dev, "Unexpected event\n"); break; } } /* * One URB can contain data for multiple ports. Demultiplex the data, * checking the port exists, is opened and the message is valid. */ static void mxuport_process_read_urb_demux_data(struct urb *urb) { struct usb_serial_port *port = urb->context; struct usb_serial *serial = port->serial; u8 *data = urb->transfer_buffer; u8 *end = data + urb->actual_length; struct usb_serial_port *demux_port; u8 *ch; u16 rcv_port; u16 rcv_len; while (data < end) { if (data + HEADER_SIZE > end) { dev_warn(&port->dev, "%s - message with short header\n", __func__); return; } rcv_port = get_unaligned_be16(data); if (rcv_port >= serial->num_ports) { dev_warn(&port->dev, "%s - message for invalid port\n", __func__); return; } demux_port = serial->port[rcv_port]; rcv_len = get_unaligned_be16(data + 2); if (!rcv_len || data + HEADER_SIZE + rcv_len > end) { dev_warn(&port->dev, "%s - short data\n", __func__); return; } if (tty_port_initialized(&demux_port->port)) { ch = data + HEADER_SIZE; mxuport_process_read_urb_data(demux_port, ch, rcv_len); } else { dev_dbg(&demux_port->dev, "%s - data for closed port\n", __func__); } data += HEADER_SIZE + rcv_len; } } /* * One URB can contain events for multiple ports. Demultiplex the event, * checking the port exists, and is opened. */ static void mxuport_process_read_urb_demux_event(struct urb *urb) { struct usb_serial_port *port = urb->context; struct usb_serial *serial = port->serial; u8 *data = urb->transfer_buffer; u8 *end = data + urb->actual_length; struct usb_serial_port *demux_port; u8 *ch; u16 rcv_port; u16 rcv_event; while (data < end) { if (data + EVENT_LENGTH > end) { dev_warn(&port->dev, "%s - message with short event\n", __func__); return; } rcv_port = get_unaligned_be16(data); if (rcv_port >= serial->num_ports) { dev_warn(&port->dev, "%s - message for invalid port\n", __func__); return; } demux_port = serial->port[rcv_port]; if (tty_port_initialized(&demux_port->port)) { ch = data + HEADER_SIZE; rcv_event = get_unaligned_be16(data + 2); mxuport_process_read_urb_event(demux_port, ch, rcv_event); } else { dev_dbg(&demux_port->dev, "%s - event for closed port\n", __func__); } data += EVENT_LENGTH; } } /* * This is called when we have received data on the bulk in * endpoint. Depending on which port it was received on, it can * contain serial data or events. */ static void mxuport_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct usb_serial *serial = port->serial; if (port == serial->port[0]) mxuport_process_read_urb_demux_data(urb); if (port == serial->port[1]) mxuport_process_read_urb_demux_event(urb); } /* * Ask the device how many bytes it has queued to be sent out. If * there are none, return true. */ static bool mxuport_tx_empty(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; bool is_empty = true; u32 txlen; u8 *len_buf; int err; len_buf = kzalloc(4, GFP_KERNEL); if (!len_buf) goto out; err = mxuport_recv_ctrl_urb(serial, RQ_VENDOR_GET_OUTQUEUE, 0, port->port_number, len_buf, 4); if (err < 0) goto out; txlen = get_unaligned_be32(len_buf); dev_dbg(&port->dev, "%s - tx len = %u\n", __func__, txlen); if (txlen != 0) is_empty = false; out: kfree(len_buf); return is_empty; } static int mxuport_set_mcr(struct usb_serial_port *port, u8 mcr_state) { struct usb_serial *serial = port->serial; int err; dev_dbg(&port->dev, "%s - %02x\n", __func__, mcr_state); err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_MCR, mcr_state, port->port_number); if (err) dev_err(&port->dev, "%s - failed to change MCR\n", __func__); return err; } static int mxuport_set_dtr(struct usb_serial_port *port, int on) { struct mxuport_port *mxport = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; int err; mutex_lock(&mxport->mutex); err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_DTR, !!on, port->port_number); if (!err) { if (on) mxport->mcr_state |= UART_MCR_DTR; else mxport->mcr_state &= ~UART_MCR_DTR; } mutex_unlock(&mxport->mutex); return err; } static int mxuport_set_rts(struct usb_serial_port *port, u8 state) { struct mxuport_port *mxport = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; int err; u8 mcr_state; mutex_lock(&mxport->mutex); mcr_state = mxport->mcr_state; switch (state) { case MX_RTS_DISABLE: mcr_state &= ~UART_MCR_RTS; break; case MX_RTS_ENABLE: mcr_state |= UART_MCR_RTS; break; case MX_RTS_HW: /* * Do not update mxport->mcr_state when doing hardware * flow control. */ break; default: /* * Should not happen, but somebody might try passing * MX_RTS_NO_CHANGE, which is not valid. */ err = -EINVAL; goto out; } err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RTS, state, port->port_number); if (!err) mxport->mcr_state = mcr_state; out: mutex_unlock(&mxport->mutex); return err; } static void mxuport_dtr_rts(struct usb_serial_port *port, int on) { struct mxuport_port *mxport = usb_get_serial_port_data(port); u8 mcr_state; int err; mutex_lock(&mxport->mutex); mcr_state = mxport->mcr_state; if (on) mcr_state |= (UART_MCR_RTS | UART_MCR_DTR); else mcr_state &= ~(UART_MCR_RTS | UART_MCR_DTR); err = mxuport_set_mcr(port, mcr_state); if (!err) mxport->mcr_state = mcr_state; mutex_unlock(&mxport->mutex); } static int mxuport_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct mxuport_port *mxport = usb_get_serial_port_data(port); int err; u8 mcr_state; mutex_lock(&mxport->mutex); mcr_state = mxport->mcr_state; if (set & TIOCM_RTS) mcr_state |= UART_MCR_RTS; if (set & TIOCM_DTR) mcr_state |= UART_MCR_DTR; if (clear & TIOCM_RTS) mcr_state &= ~UART_MCR_RTS; if (clear & TIOCM_DTR) mcr_state &= ~UART_MCR_DTR; err = mxuport_set_mcr(port, mcr_state); if (!err) mxport->mcr_state = mcr_state; mutex_unlock(&mxport->mutex); return err; } static int mxuport_tiocmget(struct tty_struct *tty) { struct mxuport_port *mxport; struct usb_serial_port *port = tty->driver_data; unsigned int result; unsigned long flags; unsigned int msr; unsigned int mcr; mxport = usb_get_serial_port_data(port); mutex_lock(&mxport->mutex); spin_lock_irqsave(&mxport->spinlock, flags); msr = mxport->msr_state; mcr = mxport->mcr_state; spin_unlock_irqrestore(&mxport->spinlock, flags); mutex_unlock(&mxport->mutex); result = (((mcr & UART_MCR_DTR) ? TIOCM_DTR : 0) | /* 0x002 */ ((mcr & UART_MCR_RTS) ? TIOCM_RTS : 0) | /* 0x004 */ ((msr & UART_MSR_CTS) ? TIOCM_CTS : 0) | /* 0x020 */ ((msr & UART_MSR_DCD) ? TIOCM_CAR : 0) | /* 0x040 */ ((msr & UART_MSR_RI) ? TIOCM_RI : 0) | /* 0x080 */ ((msr & UART_MSR_DSR) ? TIOCM_DSR : 0)); /* 0x100 */ dev_dbg(&port->dev, "%s - 0x%04x\n", __func__, result); return result; } static int mxuport_set_termios_flow(struct tty_struct *tty, const struct ktermios *old_termios, struct usb_serial_port *port, struct usb_serial *serial) { u8 xon = START_CHAR(tty); u8 xoff = STOP_CHAR(tty); int enable; int err; u8 *buf; u8 rts; buf = kmalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; /* S/W flow control settings */ if (I_IXOFF(tty) || I_IXON(tty)) { enable = 1; buf[0] = xon; buf[1] = xoff; err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_CHARS, 0, port->port_number, buf, 2); if (err) goto out; dev_dbg(&port->dev, "%s - XON = 0x%02x, XOFF = 0x%02x\n", __func__, xon, xoff); } else { enable = 0; } err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_XONXOFF, enable, port->port_number); if (err) goto out; rts = MX_RTS_NO_CHANGE; /* H/W flow control settings */ if (!old_termios || C_CRTSCTS(tty) != (old_termios->c_cflag & CRTSCTS)) { if (C_CRTSCTS(tty)) rts = MX_RTS_HW; else rts = MX_RTS_ENABLE; } if (C_BAUD(tty)) { if (old_termios && (old_termios->c_cflag & CBAUD) == B0) { /* Raise DTR and RTS */ if (C_CRTSCTS(tty)) rts = MX_RTS_HW; else rts = MX_RTS_ENABLE; mxuport_set_dtr(port, 1); } } else { /* Drop DTR and RTS */ rts = MX_RTS_DISABLE; mxuport_set_dtr(port, 0); } if (rts != MX_RTS_NO_CHANGE) err = mxuport_set_rts(port, rts); out: kfree(buf); return err; } static void mxuport_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_serial *serial = port->serial; u8 *buf; u8 data_bits; u8 stop_bits; u8 parity; int baud; int err; if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios) && tty->termios.c_iflag == old_termios->c_iflag) { dev_dbg(&port->dev, "%s - nothing to change\n", __func__); return; } buf = kmalloc(4, GFP_KERNEL); if (!buf) return; /* Set data bit of termios */ switch (C_CSIZE(tty)) { case CS5: data_bits = MX_WORDLENGTH_5; break; case CS6: data_bits = MX_WORDLENGTH_6; break; case CS7: data_bits = MX_WORDLENGTH_7; break; case CS8: default: data_bits = MX_WORDLENGTH_8; break; } /* Set parity of termios */ if (C_PARENB(tty)) { if (C_CMSPAR(tty)) { if (C_PARODD(tty)) parity = MX_PARITY_MARK; else parity = MX_PARITY_SPACE; } else { if (C_PARODD(tty)) parity = MX_PARITY_ODD; else parity = MX_PARITY_EVEN; } } else { parity = MX_PARITY_NONE; } /* Set stop bit of termios */ if (C_CSTOPB(tty)) stop_bits = MX_STOP_BITS_2; else stop_bits = MX_STOP_BITS_1; buf[0] = data_bits; buf[1] = parity; buf[2] = stop_bits; buf[3] = 0; err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_LINE, 0, port->port_number, buf, 4); if (err) goto out; err = mxuport_set_termios_flow(tty, old_termios, port, serial); if (err) goto out; baud = tty_get_baud_rate(tty); if (!baud) baud = 9600; /* Note: Little Endian */ put_unaligned_le32(baud, buf); err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_SET_BAUD, 0, port->port_number, buf, 4); if (err) goto out; dev_dbg(&port->dev, "baud_rate : %d\n", baud); dev_dbg(&port->dev, "data_bits : %d\n", data_bits); dev_dbg(&port->dev, "parity : %d\n", parity); dev_dbg(&port->dev, "stop_bits : %d\n", stop_bits); out: kfree(buf); } /* * Determine how many ports this device has dynamically. It will be * called after the probe() callback is called, but before attach(). */ static int mxuport_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { unsigned long features = (unsigned long)usb_get_serial_data(serial); int num_ports; int i; if (features & MX_UPORT_2_PORT) { num_ports = 2; } else if (features & MX_UPORT_4_PORT) { num_ports = 4; } else if (features & MX_UPORT_8_PORT) { num_ports = 8; } else if (features & MX_UPORT_16_PORT) { num_ports = 16; } else { dev_warn(&serial->interface->dev, "unknown device, assuming two ports\n"); num_ports = 2; } /* * Setup bulk-out endpoint multiplexing. All ports share the same * bulk-out endpoint. */ BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_out) < 16); for (i = 1; i < num_ports; ++i) epds->bulk_out[i] = epds->bulk_out[0]; epds->num_bulk_out = num_ports; return num_ports; } /* Get the version of the firmware currently running. */ static int mxuport_get_fw_version(struct usb_serial *serial, u32 *version) { u8 *ver_buf; int err; ver_buf = kzalloc(4, GFP_KERNEL); if (!ver_buf) return -ENOMEM; /* Get firmware version from SDRAM */ err = mxuport_recv_ctrl_urb(serial, RQ_VENDOR_GET_VERSION, 0, 0, ver_buf, 4); if (err != 4) { err = -EIO; goto out; } *version = (ver_buf[0] << 16) | (ver_buf[1] << 8) | ver_buf[2]; err = 0; out: kfree(ver_buf); return err; } /* Given a firmware blob, download it to the device. */ static int mxuport_download_fw(struct usb_serial *serial, const struct firmware *fw_p) { u8 *fw_buf; size_t txlen; size_t fwidx; int err; fw_buf = kmalloc(DOWN_BLOCK_SIZE, GFP_KERNEL); if (!fw_buf) return -ENOMEM; dev_dbg(&serial->interface->dev, "Starting firmware download...\n"); err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_START_FW_DOWN, 0, 0); if (err) goto out; fwidx = 0; do { txlen = min_t(size_t, (fw_p->size - fwidx), DOWN_BLOCK_SIZE); memcpy(fw_buf, &fw_p->data[fwidx], txlen); err = mxuport_send_ctrl_data_urb(serial, RQ_VENDOR_FW_DATA, 0, 0, fw_buf, txlen); if (err) { mxuport_send_ctrl_urb(serial, RQ_VENDOR_STOP_FW_DOWN, 0, 0); goto out; } fwidx += txlen; usleep_range(1000, 2000); } while (fwidx < fw_p->size); msleep(1000); err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_STOP_FW_DOWN, 0, 0); if (err) goto out; msleep(1000); err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_QUERY_FW_READY, 0, 0); out: kfree(fw_buf); return err; } static int mxuport_probe(struct usb_serial *serial, const struct usb_device_id *id) { u16 productid = le16_to_cpu(serial->dev->descriptor.idProduct); const struct firmware *fw_p = NULL; u32 version; int local_ver; char buf[32]; int err; /* Load our firmware */ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_QUERY_FW_CONFIG, 0, 0); if (err) { mxuport_send_ctrl_urb(serial, RQ_VENDOR_RESET_DEVICE, 0, 0); return err; } err = mxuport_get_fw_version(serial, &version); if (err < 0) return err; dev_dbg(&serial->interface->dev, "Device firmware version v%x.%x.%x\n", (version & 0xff0000) >> 16, (version & 0xff00) >> 8, (version & 0xff)); snprintf(buf, sizeof(buf) - 1, "moxa/moxa-%04x.fw", productid); err = request_firmware(&fw_p, buf, &serial->interface->dev); if (err) { dev_warn(&serial->interface->dev, "Firmware %s not found\n", buf); /* Use the firmware already in the device */ err = 0; } else { local_ver = ((fw_p->data[VER_ADDR_1] << 16) | (fw_p->data[VER_ADDR_2] << 8) | fw_p->data[VER_ADDR_3]); dev_dbg(&serial->interface->dev, "Available firmware version v%x.%x.%x\n", fw_p->data[VER_ADDR_1], fw_p->data[VER_ADDR_2], fw_p->data[VER_ADDR_3]); if (local_ver > version) { err = mxuport_download_fw(serial, fw_p); if (err) goto out; err = mxuport_get_fw_version(serial, &version); if (err < 0) goto out; } } dev_info(&serial->interface->dev, "Using device firmware version v%x.%x.%x\n", (version & 0xff0000) >> 16, (version & 0xff00) >> 8, (version & 0xff)); /* * Contains the features of this hardware. Store away for * later use, eg, number of ports. */ usb_set_serial_data(serial, (void *)id->driver_info); out: if (fw_p) release_firmware(fw_p); return err; } static int mxuport_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mxuport_port *mxport; int err; mxport = devm_kzalloc(&port->dev, sizeof(struct mxuport_port), GFP_KERNEL); if (!mxport) return -ENOMEM; mutex_init(&mxport->mutex); spin_lock_init(&mxport->spinlock); /* Set the port private data */ usb_set_serial_port_data(port, mxport); /* Set FIFO (Enable) */ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_FIFO_DISABLE, 0, port->port_number); if (err) return err; /* Set transmission mode (Hi-Performance) */ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_HIGH_PERFOR, 0, port->port_number); if (err) return err; /* Set interface (RS-232) */ return mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_INTERFACE, MX_INT_RS232, port->port_number); } static int mxuport_attach(struct usb_serial *serial) { struct usb_serial_port *port0 = serial->port[0]; struct usb_serial_port *port1 = serial->port[1]; int err; /* * All data from the ports is received on the first bulk in * endpoint, with a multiplex header. The second bulk in is * used for events. * * Start to read from the device. */ err = usb_serial_generic_submit_read_urbs(port0, GFP_KERNEL); if (err) return err; err = usb_serial_generic_submit_read_urbs(port1, GFP_KERNEL); if (err) { usb_serial_generic_close(port0); return err; } return 0; } static void mxuport_release(struct usb_serial *serial) { struct usb_serial_port *port0 = serial->port[0]; struct usb_serial_port *port1 = serial->port[1]; usb_serial_generic_close(port1); usb_serial_generic_close(port0); } static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port) { struct mxuport_port *mxport = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; int err; /* Set receive host (enable) */ err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN, 1, port->port_number); if (err) return err; err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_OPEN, 1, port->port_number); if (err) { mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN, 0, port->port_number); return err; } /* Initial port termios */ if (tty) mxuport_set_termios(tty, port, NULL); /* * TODO: use RQ_VENDOR_GET_MSR, once we know what it * returns. */ mxport->msr_state = 0; return err; } static void mxuport_close(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_OPEN, 0, port->port_number); mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_RX_HOST_EN, 0, port->port_number); } /* Send a break to the port. */ static int mxuport_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; int enable; if (break_state == -1) { enable = 1; dev_dbg(&port->dev, "%s - sending break\n", __func__); } else { enable = 0; dev_dbg(&port->dev, "%s - clearing break\n", __func__); } return mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_BREAK, enable, port->port_number); } static int mxuport_resume(struct usb_serial *serial) { struct usb_serial_port *port; int c = 0; int i; int r; for (i = 0; i < 2; i++) { port = serial->port[i]; r = usb_serial_generic_submit_read_urbs(port, GFP_NOIO); if (r < 0) c++; } for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!tty_port_initialized(&port->port)) continue; r = usb_serial_generic_write_start(port, GFP_NOIO); if (r < 0) c++; } return c ? -EIO : 0; } static struct usb_serial_driver mxuport_device = { .driver = { .name = "mxuport", }, .description = "MOXA UPort", .id_table = mxuport_idtable, .num_bulk_in = 2, .num_bulk_out = 1, .probe = mxuport_probe, .port_probe = mxuport_port_probe, .attach = mxuport_attach, .release = mxuport_release, .calc_num_ports = mxuport_calc_num_ports, .open = mxuport_open, .close = mxuport_close, .set_termios = mxuport_set_termios, .break_ctl = mxuport_break_ctl, .tx_empty = mxuport_tx_empty, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .throttle = mxuport_throttle, .unthrottle = mxuport_unthrottle, .tiocmget = mxuport_tiocmget, .tiocmset = mxuport_tiocmset, .dtr_rts = mxuport_dtr_rts, .process_read_urb = mxuport_process_read_urb, .prepare_write_buffer = mxuport_prepare_write_buffer, .resume = mxuport_resume, }; static struct usb_serial_driver *const serial_drivers[] = { &mxuport_device, NULL }; module_usb_serial_driver(serial_drivers, mxuport_idtable); MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>"); MODULE_AUTHOR("<support@moxa.com>"); MODULE_DESCRIPTION("Moxa UPORT USB Serial driver"); MODULE_LICENSE("GPL"); |
3167 | 1 2 3 4 5 6 7 8 9 10 11 | /* SPDX-License-Identifier: GPL-2.0 */ #include <asm/processor.h> static inline int phys_addr_valid(resource_size_t addr) { #ifdef CONFIG_PHYS_ADDR_T_64BIT return !(addr >> boot_cpu_data.x86_phys_bits); #else return 1; #endif } |
1040 10 10 10 47 46 46 47 47 275 47 322 324 324 324 47 46 46 47 45 47 46 46 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 59 58 47 58 9 54 59 59 59 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> */ #include <linux/dcache.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/srcu.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" /* * Clear all of the marks on an inode when it is being evicted from core */ void __fsnotify_inode_delete(struct inode *inode) { fsnotify_clear_marks_by_inode(inode); } EXPORT_SYMBOL_GPL(__fsnotify_inode_delete); void __fsnotify_vfsmount_delete(struct vfsmount *mnt) { fsnotify_clear_marks_by_mount(mnt); } /** * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes. * @sb: superblock being unmounted. * * Called during unmount with no locks held, so needs to be safe against * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block. */ static void fsnotify_unmount_inodes(struct super_block *sb) { struct inode *inode, *iput_inode = NULL; spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { /* * We cannot __iget() an inode in state I_FREEING, * I_WILL_FREE, or I_NEW which is fine because by that point * the inode cannot have any associated watches. */ spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { spin_unlock(&inode->i_lock); continue; } /* * If i_count is zero, the inode cannot have any watches and * doing an __iget/iput with SB_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is * unnecessarily violent and may in fact be illegal to do. * However, we should have been called /after/ evict_inodes * removed all zero refcount inodes, in any case. Test to * be sure. */ if (!atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); iput(iput_inode); /* for each watch, send FS_UNMOUNT and then remove it */ fsnotify_inode(inode, FS_UNMOUNT); fsnotify_inode_delete(inode); iput_inode = inode; cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); iput(iput_inode); } void fsnotify_sb_delete(struct super_block *sb) { struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); /* Were any marks ever added to any object on this sb? */ if (!sbinfo) return; fsnotify_unmount_inodes(sb); fsnotify_clear_marks_by_sb(sb); /* Wait for outstanding object references from connectors */ wait_var_event(fsnotify_sb_watched_objects(sb), !atomic_long_read(fsnotify_sb_watched_objects(sb))); WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_CONTENT)); WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_PRE_CONTENT)); } void fsnotify_sb_free(struct super_block *sb) { kfree(sb->s_fsnotify_info); } /* * Given an inode, first check if we care what happens to our children. Inotify * and dnotify both tell their parents about events. If we care about any event * on a child we run all of our children and set a dentry flag saying that the * parent cares. Thus when an event happens on a child it can quickly tell * if there is a need to find a parent and send the event to the parent. */ void fsnotify_set_children_dentry_flags(struct inode *inode) { struct dentry *alias; if (!S_ISDIR(inode->i_mode)) return; spin_lock(&inode->i_lock); /* run all of the dentries associated with this inode. Since this is a * directory, there damn well better only be one item on this list */ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { struct dentry *child; /* run all of the children of the original inode and fix their * d_flags to indicate parental interest (their parent is the * original inode) */ spin_lock(&alias->d_lock); hlist_for_each_entry(child, &alias->d_children, d_sib) { if (!child->d_inode) continue; spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } spin_unlock(&alias->d_lock); } spin_unlock(&inode->i_lock); } /* * Lazily clear false positive PARENT_WATCHED flag for child whose parent had * stopped watching children. */ static void fsnotify_clear_child_dentry_flag(struct inode *pinode, struct dentry *dentry) { spin_lock(&dentry->d_lock); /* * d_lock is a sufficient barrier to prevent observing a non-watched * parent state from before the fsnotify_set_children_dentry_flags() * or fsnotify_update_flags() call that had set PARENT_WATCHED. */ if (!fsnotify_inode_watches_children(pinode)) dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; spin_unlock(&dentry->d_lock); } /* Are inode/sb/mount interested in parent and name info with this event? */ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask, __u32 mask) { __u32 marks_mask = 0; /* We only send parent/name to inode/sb/mount for events on non-dir */ if (mask & FS_ISDIR) return false; /* * All events that are possible on child can also may be reported with * parent/name info to inode/sb/mount. Otherwise, a watching parent * could result in events reported with unexpected name info to sb/mount. */ BUILD_BUG_ON(FS_EVENTS_POSS_ON_CHILD & ~FS_EVENTS_POSS_TO_PARENT); /* Did either inode/sb/mount subscribe for events with parent/name? */ marks_mask |= fsnotify_parent_needed_mask( READ_ONCE(inode->i_fsnotify_mask)); marks_mask |= fsnotify_parent_needed_mask( READ_ONCE(inode->i_sb->s_fsnotify_mask)); marks_mask |= fsnotify_parent_needed_mask(mnt_mask); /* Did they subscribe for this event with parent/name info? */ return mask & marks_mask; } /* Are there any inode/mount/sb objects that are interested in this event? */ static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask, __u32 mask) { __u32 marks_mask = READ_ONCE(inode->i_fsnotify_mask) | mnt_mask | READ_ONCE(inode->i_sb->s_fsnotify_mask); return mask & marks_mask & ALL_FSNOTIFY_EVENTS; } /* * Notify this dentry's parent about a child's events with child name info * if parent is watching or if inode/sb/mount are interested in events with * parent and name info. * * Notify only the child without name info if parent is not watching and * inode/sb/mount are not interested in events with parent and name info. */ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { const struct path *path = fsnotify_data_path(data, data_type); __u32 mnt_mask = path ? READ_ONCE(real_mount(path->mnt)->mnt_fsnotify_mask) : 0; struct inode *inode = d_inode(dentry); struct dentry *parent; bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED; bool parent_needed, parent_interested; __u32 p_mask; struct inode *p_inode = NULL; struct name_snapshot name; struct qstr *file_name = NULL; int ret = 0; /* Optimize the likely case of nobody watching this path */ if (likely(!parent_watched && !fsnotify_object_watched(inode, mnt_mask, mask))) return 0; parent = NULL; parent_needed = fsnotify_event_needs_parent(inode, mnt_mask, mask); if (!parent_watched && !parent_needed) goto notify; /* Does parent inode care about events on children? */ parent = dget_parent(dentry); p_inode = parent->d_inode; p_mask = fsnotify_inode_watches_children(p_inode); if (unlikely(parent_watched && !p_mask)) fsnotify_clear_child_dentry_flag(p_inode, dentry); /* * Include parent/name in notification either if some notification * groups require parent info or the parent is interested in this event. */ parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS; if (parent_needed || parent_interested) { /* When notifying parent, child should be passed as data */ WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type)); /* Notify both parent and child with child name info */ take_dentry_name_snapshot(&name, dentry); file_name = &name.name; if (parent_interested) mask |= FS_EVENT_ON_CHILD; } notify: ret = fsnotify(mask, data, data_type, p_inode, file_name, inode, 0); if (file_name) release_dentry_name_snapshot(&name); dput(parent); return ret; } EXPORT_SYMBOL_GPL(__fsnotify_parent); static int fsnotify_handle_inode_event(struct fsnotify_group *group, struct fsnotify_mark *inode_mark, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, u32 cookie) { const struct path *path = fsnotify_data_path(data, data_type); struct inode *inode = fsnotify_data_inode(data, data_type); const struct fsnotify_ops *ops = group->ops; if (WARN_ON_ONCE(!ops->handle_inode_event)) return 0; if (WARN_ON_ONCE(!inode && !dir)) return 0; if ((inode_mark->flags & FSNOTIFY_MARK_FLAG_EXCL_UNLINK) && path && d_unlinked(path->dentry)) return 0; /* Check interest of this mark in case event was sent with two marks */ if (!(mask & inode_mark->mask & ALL_FSNOTIFY_EVENTS)) return 0; return ops->handle_inode_event(inode_mark, mask, inode, dir, name, cookie); } static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct fsnotify_mark *parent_mark = fsnotify_iter_parent_mark(iter_info); int ret; if (WARN_ON_ONCE(fsnotify_iter_sb_mark(iter_info)) || WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info))) return 0; /* * For FS_RENAME, 'dir' is old dir and 'data' is new dentry. * The only ->handle_inode_event() backend that supports FS_RENAME is * dnotify, where it means file was renamed within same parent. */ if (mask & FS_RENAME) { struct dentry *moved = fsnotify_data_dentry(data, data_type); if (dir != moved->d_parent->d_inode) return 0; } if (parent_mark) { ret = fsnotify_handle_inode_event(group, parent_mark, mask, data, data_type, dir, name, 0); if (ret) return ret; } if (!inode_mark) return 0; if (mask & FS_EVENT_ON_CHILD) { /* * Some events can be sent on both parent dir and child marks * (e.g. FS_ATTRIB). If both parent dir and child are * watching, report the event once to parent dir with name (if * interested) and once to child without name (if interested). * The child watcher is expecting an event without a file name * and without the FS_EVENT_ON_CHILD flag. */ mask &= ~FS_EVENT_ON_CHILD; dir = NULL; name = NULL; } return fsnotify_handle_inode_event(group, inode_mark, mask, data, data_type, dir, name, cookie); } static int send_to_group(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); __u32 marks_mask = 0; __u32 marks_ignore_mask = 0; bool is_dir = mask & FS_ISDIR; struct fsnotify_mark *mark; int type; if (!iter_info->report_mask) return 0; /* clear ignored on inode modification */ if (mask & FS_MODIFY) { fsnotify_foreach_iter_mark_type(iter_info, mark, type) { if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) mark->ignore_mask = 0; } } /* Are any of the group marks interested in this event? */ fsnotify_foreach_iter_mark_type(iter_info, mark, type) { group = mark->group; marks_mask |= mark->mask; marks_ignore_mask |= fsnotify_effective_ignore_mask(mark, is_dir, type); } pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignore_mask=%x data=%p data_type=%d dir=%p cookie=%d\n", __func__, group, mask, marks_mask, marks_ignore_mask, data, data_type, dir, cookie); if (!(test_mask & marks_mask & ~marks_ignore_mask)) return 0; if (group->ops->handle_event) { return group->ops->handle_event(group, mask, data, data_type, dir, file_name, cookie, iter_info); } return fsnotify_handle_event(group, mask, data, data_type, dir, file_name, cookie, iter_info); } static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp) { struct fsnotify_mark_connector *conn; struct hlist_node *node = NULL; conn = srcu_dereference(*connp, &fsnotify_mark_srcu); if (conn) node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu); return hlist_entry_safe(node, struct fsnotify_mark, obj_list); } static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) { struct hlist_node *node = NULL; if (mark) node = srcu_dereference(mark->obj_list.next, &fsnotify_mark_srcu); return hlist_entry_safe(node, struct fsnotify_mark, obj_list); } /* * iter_info is a multi head priority queue of marks. * Pick a subset of marks from queue heads, all with the same group * and set the report_mask to a subset of the selected marks. * Returns false if there are no more groups to iterate. */ static bool fsnotify_iter_select_report_types( struct fsnotify_iter_info *iter_info) { struct fsnotify_group *max_prio_group = NULL; struct fsnotify_mark *mark; int type; /* Choose max prio group among groups of all queue heads */ fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && fsnotify_compare_groups(max_prio_group, mark->group) > 0) max_prio_group = mark->group; } if (!max_prio_group) return false; /* Set the report mask for marks from same group as max prio group */ iter_info->current_group = max_prio_group; iter_info->report_mask = 0; fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && mark->group == iter_info->current_group) { /* * FSNOTIFY_ITER_TYPE_PARENT indicates that this inode * is watching children and interested in this event, * which is an event possible on child. * But is *this mark* watching children? */ if (type == FSNOTIFY_ITER_TYPE_PARENT && !(mark->mask & FS_EVENT_ON_CHILD) && !(fsnotify_ignore_mask(mark) & FS_EVENT_ON_CHILD)) continue; fsnotify_iter_set_report_type(iter_info, type); } } return true; } /* * Pop from iter_info multi head queue, the marks that belong to the group of * current iteration step. */ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *mark; int type; /* * We cannot use fsnotify_foreach_iter_mark_type() here because we * may need to advance a mark of type X that belongs to current_group * but was not selected for reporting. */ fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && mark->group == iter_info->current_group) iter_info->marks[type] = fsnotify_next_mark(iter_info->marks[type]); } } /* * fsnotify - This is the main call to fsnotify. * * The VFS calls into hook specific functions in linux/fsnotify.h. * Those functions then in turn call here. Here will call out to all of the * registered fsnotify_group. Those groups can then use the notification event * in whatever means they feel necessary. * * @mask: event type and flags * @data: object that event happened on * @data_type: type of object for fanotify_data_XXX() accessors * @dir: optional directory associated with event - * if @file_name is not NULL, this is the directory that * @file_name is relative to * @file_name: optional file name associated with event * @inode: optional inode associated with event - * If @dir and @inode are both non-NULL, event may be * reported to both. * @cookie: inotify rename cookie */ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, struct inode *inode, u32 cookie) { const struct path *path = fsnotify_data_path(data, data_type); struct super_block *sb = fsnotify_data_sb(data, data_type); struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); struct fsnotify_iter_info iter_info = {}; struct mount *mnt = NULL; struct inode *inode2 = NULL; struct dentry *moved; int inode2_type; int ret = 0; __u32 test_mask, marks_mask; if (path) mnt = real_mount(path->mnt); if (!inode) { /* Dirent event - report on TYPE_INODE to dir */ inode = dir; /* For FS_RENAME, inode is old_dir and inode2 is new_dir */ if (mask & FS_RENAME) { moved = fsnotify_data_dentry(data, data_type); inode2 = moved->d_parent->d_inode; inode2_type = FSNOTIFY_ITER_TYPE_INODE2; } } else if (mask & FS_EVENT_ON_CHILD) { /* * Event on child - report on TYPE_PARENT to dir if it is * watching children and on TYPE_INODE to child. */ inode2 = dir; inode2_type = FSNOTIFY_ITER_TYPE_PARENT; } /* * Optimization: srcu_read_lock() has a memory barrier which can * be expensive. It protects walking the *_fsnotify_marks lists. * However, if we do not walk the lists, we do not have to do * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ if ((!sbinfo || !sbinfo->sb_marks) && (!mnt || !mnt->mnt_fsnotify_marks) && (!inode || !inode->i_fsnotify_marks) && (!inode2 || !inode2->i_fsnotify_marks)) return 0; marks_mask = READ_ONCE(sb->s_fsnotify_mask); if (mnt) marks_mask |= READ_ONCE(mnt->mnt_fsnotify_mask); if (inode) marks_mask |= READ_ONCE(inode->i_fsnotify_mask); if (inode2) marks_mask |= READ_ONCE(inode2->i_fsnotify_mask); /* * If this is a modify event we may need to clear some ignore masks. * In that case, the object with ignore masks will have the FS_MODIFY * event in its mask. * Otherwise, return if none of the marks care about this type of event. */ test_mask = (mask & ALL_FSNOTIFY_EVENTS); if (!(test_mask & marks_mask)) return 0; iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); if (sbinfo) { iter_info.marks[FSNOTIFY_ITER_TYPE_SB] = fsnotify_first_mark(&sbinfo->sb_marks); } if (mnt) { iter_info.marks[FSNOTIFY_ITER_TYPE_VFSMOUNT] = fsnotify_first_mark(&mnt->mnt_fsnotify_marks); } if (inode) { iter_info.marks[FSNOTIFY_ITER_TYPE_INODE] = fsnotify_first_mark(&inode->i_fsnotify_marks); } if (inode2) { iter_info.marks[inode2_type] = fsnotify_first_mark(&inode2->i_fsnotify_marks); } /* * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark * ignore masks are properly reflected for mount/sb mark notifications. * That's why this traversal is so complicated... */ while (fsnotify_iter_select_report_types(&iter_info)) { ret = send_to_group(mask, data, data_type, dir, file_name, cookie, &iter_info); if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; fsnotify_iter_next(&iter_info); } ret = 0; out: srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx); return ret; } EXPORT_SYMBOL_GPL(fsnotify); static __init int fsnotify_init(void) { int ret; BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) panic("initializing fsnotify_mark_srcu"); fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector, SLAB_PANIC); return 0; } core_initcall(fsnotify_init); |
2 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the Diolan u2c-12 USB-I2C adapter * * Copyright (c) 2010-2011 Ericsson AB * * Derived from: * i2c-tiny-usb.c * Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/i2c.h> #define DRIVER_NAME "i2c-diolan-u2c" #define USB_VENDOR_ID_DIOLAN 0x0abf #define USB_DEVICE_ID_DIOLAN_U2C 0x3370 /* commands via USB, must match command ids in the firmware */ #define CMD_I2C_READ 0x01 #define CMD_I2C_WRITE 0x02 #define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */ #define CMD_I2C_RELEASE_SDA 0x04 #define CMD_I2C_RELEASE_SCL 0x05 #define CMD_I2C_DROP_SDA 0x06 #define CMD_I2C_DROP_SCL 0x07 #define CMD_I2C_READ_SDA 0x08 #define CMD_I2C_READ_SCL 0x09 #define CMD_GET_FW_VERSION 0x0a #define CMD_GET_SERIAL 0x0b #define CMD_I2C_START 0x0c #define CMD_I2C_STOP 0x0d #define CMD_I2C_REPEATED_START 0x0e #define CMD_I2C_PUT_BYTE 0x0f #define CMD_I2C_GET_BYTE 0x10 #define CMD_I2C_PUT_ACK 0x11 #define CMD_I2C_GET_ACK 0x12 #define CMD_I2C_PUT_BYTE_ACK 0x13 #define CMD_I2C_GET_BYTE_ACK 0x14 #define CMD_I2C_SET_SPEED 0x1b #define CMD_I2C_GET_SPEED 0x1c #define CMD_I2C_SET_CLK_SYNC 0x24 #define CMD_I2C_GET_CLK_SYNC 0x25 #define CMD_I2C_SET_CLK_SYNC_TO 0x26 #define CMD_I2C_GET_CLK_SYNC_TO 0x27 #define RESP_OK 0x00 #define RESP_FAILED 0x01 #define RESP_BAD_MEMADDR 0x04 #define RESP_DATA_ERR 0x05 #define RESP_NOT_IMPLEMENTED 0x06 #define RESP_NACK 0x07 #define RESP_TIMEOUT 0x09 #define U2C_I2C_SPEED_FAST 0 /* 400 kHz */ #define U2C_I2C_SPEED_STD 1 /* 100 kHz */ #define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */ #define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1) #define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10)) #define DIOLAN_USB_TIMEOUT 100 /* in ms */ #define DIOLAN_SYNC_TIMEOUT 20 /* in ms */ #define DIOLAN_OUTBUF_LEN 128 #define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4) #define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */ /* Structure to hold all of our device specific stuff */ struct i2c_diolan_u2c { u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */ u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */ int ep_in, ep_out; /* Endpoints */ struct usb_device *usb_dev; /* the usb device for this device */ struct usb_interface *interface;/* the interface for this device */ struct i2c_adapter adapter; /* i2c related things */ int olen; /* Output buffer length */ int ocount; /* Number of enqueued messages */ }; static uint frequency = I2C_MAX_STANDARD_MODE_FREQ; /* I2C clock frequency in Hz */ module_param(frequency, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz"); /* usb layer */ /* Send command to device, and get response. */ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev) { int ret = 0; int actual; int i; if (!dev->olen || !dev->ocount) return -EINVAL; ret = usb_bulk_msg(dev->usb_dev, usb_sndbulkpipe(dev->usb_dev, dev->ep_out), dev->obuffer, dev->olen, &actual, DIOLAN_USB_TIMEOUT); if (!ret) { for (i = 0; i < dev->ocount; i++) { int tmpret; tmpret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); /* * Stop command processing if a previous command * returned an error. * Note that we still need to retrieve all messages. */ if (ret < 0) continue; ret = tmpret; if (ret == 0 && actual > 0) { switch (dev->ibuffer[actual - 1]) { case RESP_NACK: /* * Return ENXIO if NACK was received as * response to the address phase, * EIO otherwise */ ret = i == 1 ? -ENXIO : -EIO; break; case RESP_TIMEOUT: ret = -ETIMEDOUT; break; case RESP_OK: /* strip off return code */ ret = actual - 1; break; default: ret = -EIO; break; } } } } dev->olen = 0; dev->ocount = 0; return ret; } static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush) { if (flush || dev->olen >= DIOLAN_FLUSH_LEN) return diolan_usb_transfer(dev); return 0; } /* Send command (no data) */ static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush) { dev->obuffer[dev->olen++] = command; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with one byte of data */ static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = data; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with two bytes of data */ static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1, u8 d2, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = d1; dev->obuffer[dev->olen++] = d2; dev->ocount++; return diolan_write_cmd(dev, flush); } /* * Flush input queue. * If we don't do this at startup and the controller has queued up * messages which were not retrieved, it will stop responding * at some point. */ static void diolan_flush_input(struct i2c_diolan_u2c *dev) { int i; for (i = 0; i < 10; i++) { int actual = 0; int ret; ret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); if (ret < 0 || actual == 0) break; } if (i == 10) dev_err(&dev->interface->dev, "Failed to flush input buffer\n"); } static int diolan_i2c_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_START, false); } static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false); } static int diolan_i2c_stop(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_STOP, true); } static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack, u8 *byte) { int ret; ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true); if (ret > 0) *byte = dev->ibuffer[0]; else if (ret == 0) ret = -EIO; return ret; } static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte) { return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false); } static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true); } /* Enable or disable clock synchronization (stretching) */ static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true); } /* Set clock synchronization timeout in ms */ static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms) { int to_val = ms * 10; return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO, to_val & 0xff, (to_val >> 8) & 0xff, true); } static void diolan_fw_version(struct i2c_diolan_u2c *dev) { int ret; ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true); if (ret >= 2) dev_info(&dev->interface->dev, "Diolan U2C firmware version %u.%u\n", (unsigned int)dev->ibuffer[0], (unsigned int)dev->ibuffer[1]); } static void diolan_get_serial(struct i2c_diolan_u2c *dev) { int ret; u32 serial; ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true); if (ret >= 4) { serial = le32_to_cpu(*(u32 *)dev->ibuffer); dev_info(&dev->interface->dev, "Diolan U2C serial number %u\n", serial); } } static int diolan_init(struct i2c_diolan_u2c *dev) { int speed, ret; if (frequency >= 2 * I2C_MAX_STANDARD_MODE_FREQ) { speed = U2C_I2C_SPEED_FAST; frequency = I2C_MAX_FAST_MODE_FREQ; } else if (frequency >= I2C_MAX_STANDARD_MODE_FREQ || frequency == 0) { speed = U2C_I2C_SPEED_STD; frequency = I2C_MAX_STANDARD_MODE_FREQ; } else { speed = U2C_I2C_SPEED(frequency); if (speed > U2C_I2C_SPEED_2KHZ) speed = U2C_I2C_SPEED_2KHZ; frequency = U2C_I2C_FREQ(speed); } dev_info(&dev->interface->dev, "Diolan U2C at USB bus %03d address %03d speed %d Hz\n", dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency); diolan_flush_input(dev); diolan_fw_version(dev); diolan_get_serial(dev); /* Set I2C speed */ ret = diolan_set_speed(dev, speed); if (ret < 0) return ret; /* Configure I2C clock synchronization */ ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST); if (ret < 0) return ret; if (speed != U2C_I2C_SPEED_FAST) ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT); return ret; } /* i2c layer */ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter); struct i2c_msg *pmsg; int i, j; int ret, sret; ret = diolan_i2c_start(dev); if (ret < 0) return ret; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (i) { ret = diolan_i2c_repeated_start(dev); if (ret < 0) goto abort; } ret = diolan_i2c_put_byte_ack(dev, i2c_8bit_addr_from_msg(pmsg)); if (ret < 0) goto abort; if (pmsg->flags & I2C_M_RD) { for (j = 0; j < pmsg->len; j++) { u8 byte; bool ack = j < pmsg->len - 1; /* * Don't send NACK if this is the first byte * of a SMBUS_BLOCK message. */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) ack = true; ret = diolan_i2c_get_byte_ack(dev, ack, &byte); if (ret < 0) goto abort; /* * Adjust count if first received byte is length */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) { if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto abort; } pmsg->len += byte; } pmsg->buf[j] = byte; } } else { for (j = 0; j < pmsg->len; j++) { ret = diolan_i2c_put_byte_ack(dev, pmsg->buf[j]); if (ret < 0) goto abort; } } } ret = num; abort: sret = diolan_i2c_stop(dev); if (sret < 0 && ret >= 0) ret = sret; return ret; } /* * Return list of supported functionality. */ static u32 diolan_usb_func(struct i2c_adapter *a) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm diolan_usb_algorithm = { .xfer = diolan_usb_xfer, .functionality = diolan_usb_func, }; /* device layer */ static const struct usb_device_id diolan_u2c_table[] = { { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) }, { } }; MODULE_DEVICE_TABLE(usb, diolan_u2c_table); static void diolan_u2c_free(struct i2c_diolan_u2c *dev) { usb_put_dev(dev->usb_dev); kfree(dev); } static int diolan_u2c_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_host_interface *hostif = interface->cur_altsetting; struct i2c_diolan_u2c *dev; int ret; if (hostif->desc.bInterfaceNumber != 0 || hostif->desc.bNumEndpoints < 2) return -ENODEV; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { ret = -ENOMEM; goto error; } dev->ep_out = hostif->endpoint[0].desc.bEndpointAddress; dev->ep_in = hostif->endpoint[1].desc.bEndpointAddress; dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* setup i2c adapter description */ dev->adapter.owner = THIS_MODULE; dev->adapter.class = I2C_CLASS_HWMON; dev->adapter.algo = &diolan_usb_algorithm; i2c_set_adapdata(&dev->adapter, dev); snprintf(dev->adapter.name, sizeof(dev->adapter.name), DRIVER_NAME " at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); dev->adapter.dev.parent = &dev->interface->dev; /* initialize diolan i2c interface */ ret = diolan_init(dev); if (ret < 0) { dev_err(&interface->dev, "failed to initialize adapter\n"); goto error_free; } /* and finally attach to i2c layer */ ret = i2c_add_adapter(&dev->adapter); if (ret < 0) goto error_free; dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n"); return 0; error_free: usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); error: return ret; } static void diolan_u2c_disconnect(struct usb_interface *interface) { struct i2c_diolan_u2c *dev = usb_get_intfdata(interface); i2c_del_adapter(&dev->adapter); usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); dev_dbg(&interface->dev, "disconnected\n"); } static struct usb_driver diolan_u2c_driver = { .name = DRIVER_NAME, .probe = diolan_u2c_probe, .disconnect = diolan_u2c_disconnect, .id_table = diolan_u2c_table, }; module_usb_driver(diolan_u2c_driver); MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); MODULE_DESCRIPTION(DRIVER_NAME " driver"); MODULE_LICENSE("GPL"); |
4 1 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | // SPDX-License-Identifier: GPL-2.0-only /* Common methods for dibusb-based-receivers. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" MODULE_DESCRIPTION("Common methods for DIB3000MC"); MODULE_LICENSE("GPL"); /* 3000MC/P stuff */ // Config Adjacent channels Perf -cal22 static struct dibx000_agc_config dib3000p_mt2060_agc_config = { .band_caps = BAND_VHF | BAND_UHF, .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0), .agc1_max = 48497, .agc1_min = 23593, .agc2_max = 46531, .agc2_min = 24904, .agc1_pt1 = 0x65, .agc1_pt2 = 0x69, .agc1_slope1 = 0x51, .agc1_slope2 = 0x27, .agc2_pt1 = 0, .agc2_pt2 = 0x33, .agc2_slope1 = 0x35, .agc2_slope2 = 0x37, }; static struct dib3000mc_config stk3000p_dib3000p_config = { &dib3000p_mt2060_agc_config, .max_time = 0x196, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, .agc_command1 = 1, .agc_command2 = 1, }; static struct dibx000_agc_config dib3000p_panasonic_agc_config = { .band_caps = BAND_VHF | BAND_UHF, .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0), .agc1_max = 56361, .agc1_min = 22282, .agc2_max = 47841, .agc2_min = 36045, .agc1_pt1 = 0x3b, .agc1_pt2 = 0x6b, .agc1_slope1 = 0x55, .agc1_slope2 = 0x1d, .agc2_pt1 = 0, .agc2_pt2 = 0x0a, .agc2_slope1 = 0x95, .agc2_slope2 = 0x1e, }; static struct dib3000mc_config mod3000p_dib3000p_config = { &dib3000p_panasonic_agc_config, .max_time = 0x51, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, .agc_command1 = 1, .agc_command2 = 1, }; int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap) { if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON && le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) { msleep(1000); } adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000P_I2C_ADDRESS, &mod3000p_dib3000p_config); if ((adap->fe_adap[0].fe) == NULL) adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000MC_I2C_ADDRESS, &mod3000p_dib3000p_config); if ((adap->fe_adap[0].fe) != NULL) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; st->ops.pid_parse = dib3000mc_pid_parse; st->ops.pid_ctrl = dib3000mc_pid_control; } return 0; } return -ENODEV; } EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach); static struct mt2060_config stk3000p_mt2060_config = { 0x60 }; int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap) { struct dibusb_state *st = adap->priv; u8 a,b; u16 if1 = 1220; struct i2c_adapter *tun_i2c; // First IF calibration for Liteon Sticks if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON && le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) { dibusb_read_eeprom_byte(adap->dev,0x7E,&a); dibusb_read_eeprom_byte(adap->dev,0x7F,&b); if (a == 0x00) if1 += b; else if (a == 0x80) if1 -= b; else warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b); } else if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_DIBCOM && le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_DIBCOM_MOD3001_WARM) { u8 desc; dibusb_read_eeprom_byte(adap->dev, 7, &desc); if (desc == 2) { a = 127; do { dibusb_read_eeprom_byte(adap->dev, a, &desc); a--; } while (a > 7 && (desc == 0xff || desc == 0x00)); if (desc & 0x80) if1 -= (0xff - desc); else if1 += desc; } } tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1); if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) { /* not found - use panasonic pll parameters */ if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL) return -ENOMEM; } else { st->mt2060_present = 1; /* set the correct parameters for the dib3000p */ dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config); } return 0; } EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach); |
237 279 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_UNALIGNED_H #define __LINUX_UNALIGNED_H /* * This is the most generic implementation of unaligned accesses * and should work almost anywhere. */ #include <linux/unaligned/packed_struct.h> #include <asm/byteorder.h> #include <vdso/unaligned.h> #define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) #define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr)) static inline u16 get_unaligned_le16(const void *p) { return le16_to_cpu(__get_unaligned_t(__le16, p)); } static inline u32 get_unaligned_le32(const void *p) { return le32_to_cpu(__get_unaligned_t(__le32, p)); } static inline u64 get_unaligned_le64(const void *p) { return le64_to_cpu(__get_unaligned_t(__le64, p)); } static inline void put_unaligned_le16(u16 val, void *p) { __put_unaligned_t(__le16, cpu_to_le16(val), p); } static inline void put_unaligned_le32(u32 val, void *p) { __put_unaligned_t(__le32, cpu_to_le32(val), p); } static inline void put_unaligned_le64(u64 val, void *p) { __put_unaligned_t(__le64, cpu_to_le64(val), p); } static inline u16 get_unaligned_be16(const void *p) { return be16_to_cpu(__get_unaligned_t(__be16, p)); } static inline u32 get_unaligned_be32(const void *p) { return be32_to_cpu(__get_unaligned_t(__be32, p)); } static inline u64 get_unaligned_be64(const void *p) { return be64_to_cpu(__get_unaligned_t(__be64, p)); } static inline void put_unaligned_be16(u16 val, void *p) { __put_unaligned_t(__be16, cpu_to_be16(val), p); } static inline void put_unaligned_be32(u32 val, void *p) { __put_unaligned_t(__be32, cpu_to_be32(val), p); } static inline void put_unaligned_be64(u64 val, void *p) { __put_unaligned_t(__be64, cpu_to_be64(val), p); } static inline u32 __get_unaligned_be24(const u8 *p) { return p[0] << 16 | p[1] << 8 | p[2]; } static inline u32 get_unaligned_be24(const void *p) { return __get_unaligned_be24(p); } static inline u32 __get_unaligned_le24(const u8 *p) { return p[0] | p[1] << 8 | p[2] << 16; } static inline u32 get_unaligned_le24(const void *p) { return __get_unaligned_le24(p); } static inline void __put_unaligned_be24(const u32 val, u8 *p) { *p++ = (val >> 16) & 0xff; *p++ = (val >> 8) & 0xff; *p++ = val & 0xff; } static inline void put_unaligned_be24(const u32 val, void *p) { __put_unaligned_be24(val, p); } static inline void __put_unaligned_le24(const u32 val, u8 *p) { *p++ = val & 0xff; *p++ = (val >> 8) & 0xff; *p++ = (val >> 16) & 0xff; } static inline void put_unaligned_le24(const u32 val, void *p) { __put_unaligned_le24(val, p); } static inline void __put_unaligned_be48(const u64 val, u8 *p) { *p++ = (val >> 40) & 0xff; *p++ = (val >> 32) & 0xff; *p++ = (val >> 24) & 0xff; *p++ = (val >> 16) & 0xff; *p++ = (val >> 8) & 0xff; *p++ = val & 0xff; } static inline void put_unaligned_be48(const u64 val, void *p) { __put_unaligned_be48(val, p); } static inline u64 __get_unaligned_be48(const u8 *p) { return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; } static inline u64 get_unaligned_be48(const void *p) { return __get_unaligned_be48(p); } #endif /* __LINUX_UNALIGNED_H */ |
9 9 6 6 9 9 7 7 9 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memfd.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/secretmem.h> #include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <linux/migrate.h> #include <linux/mm_inline.h> #include <linux/pagevec.h> #include <linux/sched/mm.h> #include <linux/shmem_fs.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include "internal.h" struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; static inline void sanity_check_pinned_pages(struct page **pages, unsigned long npages) { if (!IS_ENABLED(CONFIG_DEBUG_VM)) return; /* * We only pin anonymous pages if they are exclusive. Once pinned, we * can no longer turn them possibly shared and PageAnonExclusive() will * stick around until the page is freed. * * We'd like to verify that our pinned anonymous pages are still mapped * exclusively. The issue with anon THP is that we don't know how * they are/were mapped when pinning them. However, for anon * THP we can assume that either the given page (PTE-mapped THP) or * the head page (PMD-mapped THP) should be PageAnonExclusive(). If * neither is the case, there is certainly something wrong. */ for (; npages; npages--, pages++) { struct page *page = *pages; struct folio *folio = page_folio(page); if (is_zero_page(page) || !folio_test_anon(folio)) continue; if (!folio_test_large(folio) || folio_test_hugetlb(folio)) VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); else /* Either a PTE-mapped or a PMD-mapped THP. */ VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && !PageAnonExclusive(page), page); } } /* * Return the folio with ref appropriately incremented, * or NULL if that failed. */ static inline struct folio *try_get_folio(struct page *page, int refs) { struct folio *folio; retry: folio = page_folio(page); if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) return NULL; if (unlikely(!folio_ref_try_add(folio, refs))) return NULL; /* * At this point we have a stable reference to the folio; but it * could be that between calling page_folio() and the refcount * increment, the folio was split, in which case we'd end up * holding a reference on a folio that has nothing to do with the page * we were given anymore. * So now that the folio is stable, recheck that the page still * belongs to this folio. */ if (unlikely(page_folio(page) != folio)) { if (!put_devmap_managed_folio_refs(folio, refs)) folio_put_refs(folio, refs); goto retry; } return folio; } static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) { if (flags & FOLL_PIN) { if (is_zero_folio(folio)) return; node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); if (folio_test_large(folio)) atomic_sub(refs, &folio->_pincount); else refs *= GUP_PIN_COUNTING_BIAS; } if (!put_devmap_managed_folio_refs(folio, refs)) folio_put_refs(folio, refs); } /** * try_grab_folio() - add a folio's refcount by a flag-dependent amount * @folio: pointer to folio to be grabbed * @refs: the value to (effectively) add to the folio's refcount * @flags: gup flags: these are the FOLL_* flag values * * This might not do anything at all, depending on the flags argument. * * "grab" names in this file mean, "look at flags to decide whether to use * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. * * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same * time. * * Return: 0 for success, or if no action was required (if neither FOLL_PIN * nor FOLL_GET was set, nothing is done). A negative error code for failure: * * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not * be grabbed. * * It is called when we have a stable reference for the folio, typically in * GUP slow path. */ int __must_check try_grab_folio(struct folio *folio, int refs, unsigned int flags) { if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) return -ENOMEM; if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) return -EREMOTEIO; if (flags & FOLL_GET) folio_ref_add(folio, refs); else if (flags & FOLL_PIN) { /* * Don't take a pin on the zero page - it's not going anywhere * and it is used in a *lot* of places. */ if (is_zero_folio(folio)) return 0; /* * Increment the normal page refcount field at least once, * so that the page really is pinned. */ if (folio_test_large(folio)) { folio_ref_add(folio, refs); atomic_add(refs, &folio->_pincount); } else { folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); } node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); } return 0; } /** * unpin_user_page() - release a dma-pinned page * @page: pointer to page to be released * * Pages that were pinned via pin_user_pages*() must be released via either * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so * that such pages can be separately tracked and uniquely handled. In * particular, interactions with RDMA and filesystems need special handling. */ void unpin_user_page(struct page *page) { sanity_check_pinned_pages(&page, 1); gup_put_folio(page_folio(page), 1, FOLL_PIN); } EXPORT_SYMBOL(unpin_user_page); /** * unpin_folio() - release a dma-pinned folio * @folio: pointer to folio to be released * * Folios that were pinned via memfd_pin_folios() or other similar routines * must be released either using unpin_folio() or unpin_folios(). */ void unpin_folio(struct folio *folio) { gup_put_folio(folio, 1, FOLL_PIN); } EXPORT_SYMBOL_GPL(unpin_folio); /** * folio_add_pin - Try to get an additional pin on a pinned folio * @folio: The folio to be pinned * * Get an additional pin on a folio we already have a pin on. Makes no change * if the folio is a zero_page. */ void folio_add_pin(struct folio *folio) { if (is_zero_folio(folio)) return; /* * Similar to try_grab_folio(): be sure to *also* increment the normal * page refcount field at least once, so that the page really is * pinned. */ if (folio_test_large(folio)) { WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); folio_ref_inc(folio); atomic_inc(&folio->_pincount); } else { WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); } } static inline struct folio *gup_folio_range_next(struct page *start, unsigned long npages, unsigned long i, unsigned int *ntails) { struct page *next = nth_page(start, i); struct folio *folio = page_folio(next); unsigned int nr = 1; if (folio_test_large(folio)) nr = min_t(unsigned int, npages - i, folio_nr_pages(folio) - folio_page_idx(folio, next)); *ntails = nr; return folio; } static inline struct folio *gup_folio_next(struct page **list, unsigned long npages, unsigned long i, unsigned int *ntails) { struct folio *folio = page_folio(list[i]); unsigned int nr; for (nr = i + 1; nr < npages; nr++) { if (page_folio(list[nr]) != folio) break; } *ntails = nr - i; return folio; } /** * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages * @pages: array of pages to be maybe marked dirty, and definitely released. * @npages: number of pages in the @pages array. * @make_dirty: whether to mark the pages dirty * * "gup-pinned page" refers to a page that has had one of the get_user_pages() * variants called on that page. * * For each page in the @pages array, make that page (or its head page, if a * compound page) dirty, if @make_dirty is true, and if the page was previously * listed as clean. In any case, releases all pages using unpin_user_page(), * possibly via unpin_user_pages(), for the non-dirty case. * * Please see the unpin_user_page() documentation for details. * * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is * required, then the caller should a) verify that this is really correct, * because _lock() is usually required, and b) hand code it: * set_page_dirty_lock(), unpin_user_page(). * */ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty) { unsigned long i; struct folio *folio; unsigned int nr; if (!make_dirty) { unpin_user_pages(pages, npages); return; } sanity_check_pinned_pages(pages, npages); for (i = 0; i < npages; i += nr) { folio = gup_folio_next(pages, npages, i, &nr); /* * Checking PageDirty at this point may race with * clear_page_dirty_for_io(), but that's OK. Two key * cases: * * 1) This code sees the page as already dirty, so it * skips the call to set_page_dirty(). That could happen * because clear_page_dirty_for_io() called * folio_mkclean(), followed by set_page_dirty(). * However, now the page is going to get written back, * which meets the original intention of setting it * dirty, so all is well: clear_page_dirty_for_io() goes * on to call TestClearPageDirty(), and write the page * back. * * 2) This code sees the page as clean, so it calls * set_page_dirty(). The page stays dirty, despite being * written back, so it gets written back again in the * next writeback cycle. This is harmless. */ if (!folio_test_dirty(folio)) { folio_lock(folio); folio_mark_dirty(folio); folio_unlock(folio); } gup_put_folio(folio, nr, FOLL_PIN); } } EXPORT_SYMBOL(unpin_user_pages_dirty_lock); /** * unpin_user_page_range_dirty_lock() - release and optionally dirty * gup-pinned page range * * @page: the starting page of a range maybe marked dirty, and definitely released. * @npages: number of consecutive pages to release. * @make_dirty: whether to mark the pages dirty * * "gup-pinned page range" refers to a range of pages that has had one of the * pin_user_pages() variants called on that page. * * For the page ranges defined by [page .. page+npages], make that range (or * its head pages, if a compound page) dirty, if @make_dirty is true, and if the * page range was previously listed as clean. * * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is * required, then the caller should a) verify that this is really correct, * because _lock() is usually required, and b) hand code it: * set_page_dirty_lock(), unpin_user_page(). * */ void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty) { unsigned long i; struct folio *folio; unsigned int nr; for (i = 0; i < npages; i += nr) { folio = gup_folio_range_next(page, npages, i, &nr); if (make_dirty && !folio_test_dirty(folio)) { folio_lock(folio); folio_mark_dirty(folio); folio_unlock(folio); } gup_put_folio(folio, nr, FOLL_PIN); } } EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) { unsigned long i; struct folio *folio; unsigned int nr; /* * Don't perform any sanity checks because we might have raced with * fork() and some anonymous pages might now actually be shared -- * which is why we're unpinning after all. */ for (i = 0; i < npages; i += nr) { folio = gup_folio_next(pages, npages, i, &nr); gup_put_folio(folio, nr, FOLL_PIN); } } /** * unpin_user_pages() - release an array of gup-pinned pages. * @pages: array of pages to be marked dirty and released. * @npages: number of pages in the @pages array. * * For each page in the @pages array, release the page using unpin_user_page(). * * Please see the unpin_user_page() documentation for details. */ void unpin_user_pages(struct page **pages, unsigned long npages) { unsigned long i; struct folio *folio; unsigned int nr; /* * If this WARN_ON() fires, then the system *might* be leaking pages (by * leaving them pinned), but probably not. More likely, gup/pup returned * a hard -ERRNO error to the caller, who erroneously passed it here. */ if (WARN_ON(IS_ERR_VALUE(npages))) return; sanity_check_pinned_pages(pages, npages); for (i = 0; i < npages; i += nr) { folio = gup_folio_next(pages, npages, i, &nr); gup_put_folio(folio, nr, FOLL_PIN); } } EXPORT_SYMBOL(unpin_user_pages); /** * unpin_user_folio() - release pages of a folio * @folio: pointer to folio to be released * @npages: number of pages of same folio * * Release npages of the folio */ void unpin_user_folio(struct folio *folio, unsigned long npages) { gup_put_folio(folio, npages, FOLL_PIN); } EXPORT_SYMBOL(unpin_user_folio); /** * unpin_folios() - release an array of gup-pinned folios. * @folios: array of folios to be marked dirty and released. * @nfolios: number of folios in the @folios array. * * For each folio in the @folios array, release the folio using gup_put_folio. * * Please see the unpin_folio() documentation for details. */ void unpin_folios(struct folio **folios, unsigned long nfolios) { unsigned long i = 0, j; /* * If this WARN_ON() fires, then the system *might* be leaking folios * (by leaving them pinned), but probably not. More likely, gup/pup * returned a hard -ERRNO error to the caller, who erroneously passed * it here. */ if (WARN_ON(IS_ERR_VALUE(nfolios))) return; while (i < nfolios) { for (j = i + 1; j < nfolios; j++) if (folios[i] != folios[j]) break; if (folios[i]) gup_put_folio(folios[i], j - i, FOLL_PIN); i = j; } } EXPORT_SYMBOL_GPL(unpin_folios); /* * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's * lifecycle. Avoid setting the bit unless necessary, or it might cause write * cache bouncing on large SMP machines for concurrent pinned gups. */ static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) { if (!test_bit(MMF_HAS_PINNED, mm_flags)) set_bit(MMF_HAS_PINNED, mm_flags); } #ifdef CONFIG_MMU #ifdef CONFIG_HAVE_GUP_FAST static int record_subpages(struct page *page, unsigned long sz, unsigned long addr, unsigned long end, struct page **pages) { struct page *start_page; int nr; start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT); for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) pages[nr] = nth_page(start_page, nr); return nr; } /** * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. * @page: pointer to page to be grabbed * @refs: the value to (effectively) add to the folio's refcount * @flags: gup flags: these are the FOLL_* flag values. * * "grab" names in this file mean, "look at flags to decide whether to use * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. * * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the * same time. (That's true throughout the get_user_pages*() and * pin_user_pages*() APIs.) Cases: * * FOLL_GET: folio's refcount will be incremented by @refs. * * FOLL_PIN on large folios: folio's refcount will be incremented by * @refs, and its pincount will be incremented by @refs. * * FOLL_PIN on single-page folios: folio's refcount will be incremented by * @refs * GUP_PIN_COUNTING_BIAS. * * Return: The folio containing @page (with refcount appropriately * incremented) for success, or NULL upon failure. If neither FOLL_GET * nor FOLL_PIN was set, that's considered failure, and furthermore, * a likely bug in the caller, so a warning is also emitted. * * It uses add ref unless zero to elevate the folio refcount and must be called * in fast path only. */ static struct folio *try_grab_folio_fast(struct page *page, int refs, unsigned int flags) { struct folio *folio; /* Raise warn if it is not called in fast GUP */ VM_WARN_ON_ONCE(!irqs_disabled()); if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) return NULL; if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) return NULL; if (flags & FOLL_GET) return try_get_folio(page, refs); /* FOLL_PIN is set */ /* * Don't take a pin on the zero page - it's not going anywhere * and it is used in a *lot* of places. */ if (is_zero_page(page)) return page_folio(page); folio = try_get_folio(page, refs); if (!folio) return NULL; /* * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a * right zone, so fail and let the caller fall back to the slow * path. */ if (unlikely((flags & FOLL_LONGTERM) && !folio_is_longterm_pinnable(folio))) { if (!put_devmap_managed_folio_refs(folio, refs)) folio_put_refs(folio, refs); return NULL; } /* * When pinning a large folio, use an exact count to track it. * * However, be sure to *also* increment the normal folio * refcount field at least once, so that the folio really * is pinned. That's why the refcount from the earlier * try_get_folio() is left intact. */ if (folio_test_large(folio)) atomic_add(refs, &folio->_pincount); else folio_ref_add(folio, refs * (GUP_PIN_COUNTING_BIAS - 1)); /* * Adjust the pincount before re-checking the PTE for changes. * This is essentially a smp_mb() and is paired with a memory * barrier in folio_try_share_anon_rmap_*(). */ smp_mb__after_atomic(); node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); return folio; } #endif /* CONFIG_HAVE_GUP_FAST */ static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags, unsigned long address) { if (!(flags & FOLL_DUMP)) return NULL; /* * When core dumping, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if (is_vm_hugetlb_page(vma)) { struct hstate *h = hstate_vma(vma); if (!hugetlbfs_pagecache_present(h, vma, address)) return ERR_PTR(-EFAULT); } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) { return ERR_PTR(-EFAULT); } return NULL; } #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES static struct page *follow_huge_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp, int flags, struct follow_page_context *ctx) { struct mm_struct *mm = vma->vm_mm; struct page *page; pud_t pud = *pudp; unsigned long pfn = pud_pfn(pud); int ret; assert_spin_locked(pud_lockptr(mm, pudp)); if ((flags & FOLL_WRITE) && !pud_write(pud)) return NULL; if (!pud_present(pud)) return NULL; pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && pud_devmap(pud)) { /* * device mapped pages can only be returned if the caller * will manage the page reference count. * * At least one of FOLL_GET | FOLL_PIN must be set, so * assert that here: */ if (!(flags & (FOLL_GET | FOLL_PIN))) return ERR_PTR(-EEXIST); if (flags & FOLL_TOUCH) touch_pud(vma, addr, pudp, flags & FOLL_WRITE); ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap); if (!ctx->pgmap) return ERR_PTR(-EFAULT); } page = pfn_to_page(pfn); if (!pud_devmap(pud) && !pud_write(pud) && gup_must_unshare(vma, flags, page)) return ERR_PTR(-EMLINK); ret = try_grab_folio(page_folio(page), 1, flags); if (ret) page = ERR_PTR(ret); else ctx->page_mask = HPAGE_PUD_NR - 1; return page; } /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, struct vm_area_struct *vma, unsigned int flags) { /* If the pmd is writable, we can write to the page. */ if (pmd_write(pmd)) return true; /* Maybe FOLL_FORCE is set to override it? */ if (!(flags & FOLL_FORCE)) return false; /* But FOLL_FORCE has no effect on shared mappings */ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) return false; /* ... or read-only private ones */ if (!(vma->vm_flags & VM_MAYWRITE)) return false; /* ... or already writable ones that just need to take a write fault */ if (vma->vm_flags & VM_WRITE) return false; /* * See can_change_pte_writable(): we broke COW and could map the page * writable if we have an exclusive anonymous page ... */ if (!page || !PageAnon(page) || !PageAnonExclusive(page)) return false; /* ... and a write-fault isn't required for other reasons. */ if (pmd_needs_soft_dirty_wp(vma, pmd)) return false; return !userfaultfd_huge_pmd_wp(vma, pmd); } static struct page *follow_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags, struct follow_page_context *ctx) { struct mm_struct *mm = vma->vm_mm; pmd_t pmdval = *pmd; struct page *page; int ret; assert_spin_locked(pmd_lockptr(mm, pmd)); page = pmd_page(pmdval); if ((flags & FOLL_WRITE) && !can_follow_write_pmd(pmdval, page, vma, flags)) return NULL; /* Avoid dumping huge zero page */ if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval)) return ERR_PTR(-EFAULT); if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) return NULL; if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page)) return ERR_PTR(-EMLINK); VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && !PageAnonExclusive(page), page); ret = try_grab_folio(page_folio(page), 1, flags); if (ret) return ERR_PTR(ret); #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH)) touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; ctx->page_mask = HPAGE_PMD_NR - 1; return page; } #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ static struct page *follow_huge_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp, int flags, struct follow_page_context *ctx) { return NULL; } static struct page *follow_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags, struct follow_page_context *ctx) { return NULL; } #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { if (flags & FOLL_TOUCH) { pte_t orig_entry = ptep_get(pte); pte_t entry = orig_entry; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(orig_entry, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ static inline bool can_follow_write_pte(pte_t pte, struct page *page, struct vm_area_struct *vma, unsigned int flags) { /* If the pte is writable, we can write to the page. */ if (pte_write(pte)) return true; /* Maybe FOLL_FORCE is set to override it? */ if (!(flags & FOLL_FORCE)) return false; /* But FOLL_FORCE has no effect on shared mappings */ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) return false; /* ... or read-only private ones */ if (!(vma->vm_flags & VM_MAYWRITE)) return false; /* ... or already writable ones that just need to take a write fault */ if (vma->vm_flags & VM_WRITE) return false; /* * See can_change_pte_writable(): we broke COW and could map the page * writable if we have an exclusive anonymous page ... */ if (!page || !PageAnon(page) || !PageAnonExclusive(page)) return false; /* ... and a write-fault isn't required for other reasons. */ if (pte_needs_soft_dirty_wp(vma, pte)) return false; return !userfaultfd_pte_wp(vma, pte); } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) { struct mm_struct *mm = vma->vm_mm; struct folio *folio; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; int ret; /* FOLL_GET and FOLL_PIN are mutually exclusive. */ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) return no_page_table(vma, flags, address); pte = ptep_get(ptep); if (!pte_present(pte)) goto no_page; if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) goto no_page; page = vm_normal_page(vma, address, pte); /* * We only care about anon pages in can_follow_write_pte() and don't * have to worry about pte_devmap() because they are never anon. */ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, vma, flags)) { page = NULL; goto out; } if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { /* * Only return device mapping pages in the FOLL_GET or FOLL_PIN * case since they are only valid while holding the pgmap * reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } folio = page_folio(page); if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { page = ERR_PTR(-EMLINK); goto out; } VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && !PageAnonExclusive(page), page); /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ ret = try_grab_folio(folio, 1, flags); if (unlikely(ret)) { page = ERR_PTR(ret); goto out; } /* * We need to make the page accessible if and only if we are going * to access its content (the FOLL_PIN case). Please see * Documentation/core-api/pin_user_pages.rst for details. */ if (flags & FOLL_PIN) { ret = arch_make_folio_accessible(folio); if (ret) { unpin_user_page(page); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags, address); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); pmdval = pmdp_get_lockless(pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags, address); if (!pmd_present(pmdval)) return no_page_table(vma, flags, address); if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; return no_page_table(vma, flags, address); } if (likely(!pmd_leaf(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) return no_page_table(vma, flags, address); ptl = pmd_lock(mm, pmd); pmdval = *pmd; if (unlikely(!pmd_present(pmdval))) { spin_unlock(ptl); return no_page_table(vma, flags, address); } if (unlikely(!pmd_leaf(pmdval))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) { spin_unlock(ptl); split_huge_pmd(vma, pmd, address); /* If pmd was left empty, stuff a page table in there quickly */ return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_huge_pmd(vma, address, pmd, flags, ctx); spin_unlock(ptl); return page; } static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pudp, pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pudp = pud_offset(p4dp, address); pud = READ_ONCE(*pudp); if (!pud_present(pud)) return no_page_table(vma, flags, address); if (pud_leaf(pud)) { ptl = pud_lock(mm, pudp); page = follow_huge_pud(vma, address, pudp, flags, ctx); spin_unlock(ptl); if (page) return page; return no_page_table(vma, flags, address); } if (unlikely(pud_bad(pud))) return no_page_table(vma, flags, address); return follow_pmd_mask(vma, address, pudp, flags, ctx); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) { p4d_t *p4dp, p4d; p4dp = p4d_offset(pgdp, address); p4d = READ_ONCE(*p4dp); BUILD_BUG_ON(p4d_leaf(p4d)); if (!p4d_present(p4d) || p4d_bad(p4d)) return no_page_table(vma, flags, address); return follow_pud_mask(vma, address, p4dp, flags, ctx); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a * pointer to output page_mask * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches * the device's dev_pagemap metadata to avoid repeating expensive lookups. * * When getting an anonymous page and the caller has to trigger unsharing * of a shared anonymous page first, -EMLINK is returned. The caller should * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only * relevant with FOLL_PIN and !FOLL_WRITE. * * On output, the @ctx->page_mask is set according to the size of the page. * * Return: the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ static struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct mm_struct *mm = vma->vm_mm; struct page *page; vma_pgtable_walk_begin(vma); ctx->page_mask = 0; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) page = no_page_table(vma, flags, address); else page = follow_p4d_mask(vma, address, pgd, flags, ctx); vma_pgtable_walk_end(vma); return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); if (pgd_none(*pgd)) return -EFAULT; p4d = p4d_offset(pgd, address); if (p4d_none(*p4d)) return -EFAULT; pud = pud_offset(p4d, address); if (pud_none(*pud)) return -EFAULT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; pte = pte_offset_map(pmd, address); if (!pte) return -EFAULT; entry = ptep_get(pte); if (pte_none(entry)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, entry); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) goto unmap; *page = pte_page(entry); } ret = try_grab_folio(page_folio(*page), 1, gup_flags); if (unlikely(ret)) goto unmap; out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set * to 0 and -EBUSY returned. */ static int faultin_page(struct vm_area_struct *vma, unsigned long address, unsigned int flags, bool unshare, int *locked) { unsigned int fault_flags = 0; vm_fault_t ret; if (flags & FOLL_NOFAULT) return -EFAULT; if (flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (flags & FOLL_UNLOCKABLE) { fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; /* * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. * That's because some callers may not be prepared to * handle early exits caused by non-fatal signals. */ if (flags & FOLL_INTERRUPTIBLE) fault_flags |= FAULT_FLAG_INTERRUPTIBLE; } if (flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (flags & FOLL_TRIED) { /* * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED * can co-exist */ fault_flags |= FAULT_FLAG_TRIED; } if (unshare) { fault_flags |= FAULT_FLAG_UNSHARE; /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); } ret = handle_mm_fault(vma, address, fault_flags, NULL); if (ret & VM_FAULT_COMPLETED) { /* * With FAULT_FLAG_RETRY_NOWAIT we'll never release the * mmap lock in the page fault handler. Sanity check this. */ WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); *locked = 0; /* * We should do the same as VM_FAULT_RETRY, but let's not * return -EBUSY since that's not reflecting the reality of * what has happened - we've just fully completed a page * fault, with the mmap lock released. Use -EAGAIN to show * that we want to take the mmap lock _again_. */ return -EAGAIN; } if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, flags); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *locked = 0; return -EBUSY; } return 0; } /* * Writing to file-backed mappings which require folio dirty tracking using GUP * is a fundamentally broken operation, as kernel write access to GUP mappings * do not adhere to the semantics expected by a file system. * * Consider the following scenario:- * * 1. A folio is written to via GUP which write-faults the memory, notifying * the file system and dirtying the folio. * 2. Later, writeback is triggered, resulting in the folio being cleaned and * the PTE being marked read-only. * 3. The GUP caller writes to the folio, as it is mapped read/write via the * direct mapping. * 4. The GUP caller, now done with the page, unpins it and sets it dirty * (though it does not have to). * * This results in both data being written to a folio without writenotify, and * the folio being dirtied unexpectedly (if the caller decides to do so). */ static bool writable_file_mapping_allowed(struct vm_area_struct *vma, unsigned long gup_flags) { /* * If we aren't pinning then no problematic write can occur. A long term * pin is the most egregious case so this is the case we disallow. */ if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != (FOLL_PIN | FOLL_LONGTERM)) return true; /* * If the VMA does not require dirty tracking then no problematic write * can occur either. */ return !vma_needs_dirty_tracking(vma); } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); bool vma_anon = vma_is_anonymous(vma); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if ((gup_flags & FOLL_ANON) && !vma_anon) return -EFAULT; if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) return -EOPNOTSUPP; if (vma_is_secretmem(vma)) return -EFAULT; if (write) { if (!vma_anon && !writable_file_mapping_allowed(vma, gup_flags)) return -EFAULT; if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */ if (is_vm_hugetlb_page(vma)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /* * This is "vma_lookup()", but with a warning if we would have * historically expanded the stack in the GUP code. */ static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, unsigned long addr) { #ifdef CONFIG_STACK_GROWSUP return vma_lookup(mm, addr); #else static volatile unsigned long next_warn; struct vm_area_struct *vma; unsigned long now, next; vma = find_vma(mm, addr); if (!vma || (addr >= vma->vm_start)) return vma; /* Only warn for half-way relevant accesses */ if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; if (vma->vm_start - addr > 65536) return NULL; /* Let's not warn more than once an hour.. */ now = jiffies; next = next_warn; if (next && time_before(now, next)) return NULL; next_warn = now + 60*60*HZ; /* Let people know things may have changed. */ pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", current->comm, task_pid_nr(current), vma->vm_start, vma->vm_end, addr); dump_stack(); return NULL; #endif } /** * __get_user_pages() - pin user pages in memory * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @locked: whether we're still with the mmap_lock held * * Returns either number of pages pinned (which may be less than the * number requested), or an error. Details about the return value: * * -- If nr_pages is 0, returns 0. * -- If nr_pages is >0, but no pages were pinned, returns -errno. * -- If nr_pages is >0, and some pages were pinned, returns the number of * pages pinned. Again, this may be less than nr_pages. * -- 0 return value is possible when the fault would need to be retried. * * The caller is responsible for releasing returned @pages, via put_page(). * * Must be called with mmap_lock held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re-faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may * be released. If this happens *@locked will be set to 0 on return. * * A caller using such a combination of @gup_flags must therefore hold the * mmap_lock for reading only, and recognize when it's been released. Otherwise, * it must be held for either reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ static long __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; start = untagged_addr_remote(mm, start); VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); do { struct page *page; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { /* * MADV_POPULATE_(READ|WRITE) wants to handle VMA * lookups+error reporting differently. */ if (gup_flags & FOLL_MADV_POPULATE) { vma = vma_lookup(mm, start); if (!vma) { ret = -ENOMEM; goto out; } if (check_vma_flags(vma, gup_flags)) { ret = -EINVAL; goto out; } goto retry; } vma = gup_vma_lookup(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &page : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma) { ret = -EFAULT; goto out; } ret = check_vma_flags(vma, gup_flags); if (ret) goto out; } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -EINTR; goto out; } cond_resched(); page = follow_page_mask(vma, start, gup_flags, &ctx); if (!page || PTR_ERR(page) == -EMLINK) { ret = faultin_page(vma, start, gup_flags, PTR_ERR(page) == -EMLINK, locked); switch (ret) { case 0: goto retry; case -EBUSY: case -EAGAIN: ret = 0; fallthrough; case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. If the caller expects **pages to be * filled in, bail out now, because that can't be done * for this page. */ if (pages) { ret = PTR_ERR(page); goto out; } } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } next_page: page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; if (pages) { struct page *subpage; unsigned int j; /* * This must be a large folio (and doesn't need to * be the whole folio; it can be part of it), do * the refcount work for all the subpages too. * * NOTE: here the page may not be the head page * e.g. when start addr is not thp-size aligned. * try_grab_folio() should have taken care of tail * pages. */ if (page_increm > 1) { struct folio *folio = page_folio(page); /* * Since we already hold refcount on the * large folio, this should never fail. */ if (try_grab_folio(folio, page_increm - 1, gup_flags)) { /* * Release the 1st page ref if the * folio is problematic, fail hard. */ gup_put_folio(folio, 1, gup_flags); ret = -EFAULT; goto out; } } for (j = 0; j < page_increm; j++) { subpage = nth_page(page, j); pages[i + j] = subpage; flush_anon_page(vma, subpage, start + j * PAGE_SIZE); flush_dcache_page(subpage); } } i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; } static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /** * fixup_user_fault() - manually resolve a user page fault * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller * does not allow retry. If NULL, the caller must guarantee * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_lock. So it has not the * same semantics wrt the @mm->mmap_lock as does filemap_fault(). */ int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret; address = untagged_addr_remote(mm, address); if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; retry: vma = gup_vma_lookup(mm, address); if (!vma) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; if ((fault_flags & FAULT_FLAG_KILLABLE) && fatal_signal_pending(current)) return -EINTR; ret = handle_mm_fault(vma, address, fault_flags, NULL); if (ret & VM_FAULT_COMPLETED) { /* * NOTE: it's a pity that we need to retake the lock here * to pair with the unlock() in the callers. Ideally we * could tell the callers so they do not need to unlock. */ mmap_read_lock(mm); *unlocked = true; return 0; } if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { mmap_read_lock(mm); *unlocked = true; fault_flags |= FAULT_FLAG_TRIED; goto retry; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); /* * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is * specified, it'll also respond to generic signals. The caller of GUP * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption. */ static bool gup_signal_pending(unsigned int flags) { if (fatal_signal_pending(current)) return true; if (!(flags & FOLL_INTERRUPTIBLE)) return false; return signal_pending(current); } /* * Locking: (*locked == 1) means that the mmap_lock has already been acquired by * the caller. This function may drop the mmap_lock. If it does so, then it will * set (*locked = 0). * * (*locked == 0) means that the caller expects this function to acquire and * drop the mmap_lock. Therefore, the value of *locked will still be zero when * the function returns, even though it may have changed temporarily during * function execution. * * Please note that this function, unlike __get_user_pages(), will not return 0 * for nr_pages > 0, unless FOLL_NOWAIT is used. */ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int flags) { long ret, pages_done; bool must_unlock = false; if (!nr_pages) return 0; /* * The internal caller expects GUP to manage the lock internally and the * lock must be released when this returns. */ if (!*locked) { if (mmap_read_lock_killable(mm)) return -EAGAIN; must_unlock = true; *locked = 1; } else mmap_assert_locked(mm); if (flags & FOLL_PIN) mm_set_has_pinned_flag(&mm->flags); /* * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior * is to set FOLL_GET if the caller wants pages[] filled in (but has * carelessly failed to specify FOLL_GET), so keep doing that, but only * for FOLL_GET, not for the newer FOLL_PIN. * * FOLL_PIN always expects pages to be non-null, but no need to assert * that here, as any failures will be obvious enough. */ if (pages && !(flags & FOLL_PIN)) flags |= FOLL_GET; pages_done = 0; for (;;) { ret = __get_user_pages(mm, start, nr_pages, flags, pages, locked); if (!(flags & FOLL_UNLOCKABLE)) { /* VM_FAULT_RETRY couldn't trigger, bypass */ pages_done = ret; break; } /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* * VM_FAULT_RETRY triggered, so seek to the faulting offset. * For the prefault case (!pages) we only update counts. */ if (likely(pages)) pages += ret; start += ret << PAGE_SHIFT; /* The lock was temporarily dropped, so we must unlock later */ must_unlock = true; retry: /* * Repeat on the address that fired VM_FAULT_RETRY * with both FAULT_FLAG_ALLOW_RETRY and * FAULT_FLAG_TRIED. Note that GUP can be interrupted * by fatal signals of even common signals, depending on * the caller's request. So we need to check it before we * start trying again otherwise it can loop forever. */ if (gup_signal_pending(flags)) { if (!pages_done) pages_done = -EINTR; break; } ret = mmap_read_lock_killable(mm); if (ret) { BUG_ON(ret > 0); if (!pages_done) pages_done = ret; break; } *locked = 1; ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, pages, locked); if (!*locked) { /* Continue to retry until we succeeded */ BUG_ON(ret != 0); goto retry; } if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; if (likely(pages)) pages++; start += PAGE_SIZE; } if (must_unlock && *locked) { /* * We either temporarily dropped the lock, or the caller * requested that we both acquire and drop the lock. Either way, * we must now unlock, and notify the caller of that state. */ mmap_read_unlock(mm); *locked = 0; } /* * Failing to pin anything implies something has gone wrong (except when * FOLL_NOWAIT is specified). */ if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT))) return -EFAULT; return pages_done; } /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @locked: whether the mmap_lock is still held * * This takes care of mlocking the pages too if VM_LOCKED is set. * * Return either number of pages pinned in the vma, or a negative error * code on error. * * vma->vm_mm->mmap_lock must be held. * * If @locked is NULL, it may be held for read or write and will * be unperturbed. * * If @locked is non-NULL, it must held for read only and may be * released. If it's released, *@locked will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *locked) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int local_locked = 1; int gup_flags; long ret; VM_BUG_ON(!PAGE_ALIGNED(start)); VM_BUG_ON(!PAGE_ALIGNED(end)); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); mmap_assert_locked(mm); /* * Rightly or wrongly, the VM_LOCKONFAULT case has never used * faultin_page() to break COW, so it has no work to do here. */ if (vma->vm_flags & VM_LOCKONFAULT) return nr_pages; /* ... similarly, we've never faulted in PROT_NONE pages */ if (!vma_is_accessible(vma)) return -EFAULT; gup_flags = FOLL_TOUCH; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. * * Otherwise, do a read fault, and use FOLL_FORCE in case it's not * readable (ie write-only or executable). */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; else gup_flags |= FOLL_FORCE; if (locked) gup_flags |= FOLL_UNLOCKABLE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ ret = __get_user_pages(mm, start, nr_pages, gup_flags, NULL, locked ? locked : &local_locked); lru_add_drain(); return ret; } /* * faultin_page_range() - populate (prefault) page tables inside the * given range readable/writable * * This takes care of mlocking the pages, too, if VM_LOCKED is set. * * @mm: the mm to populate page tables in * @start: start address * @end: end address * @write: whether to prefault readable or writable * @locked: whether the mmap_lock is still held * * Returns either number of processed pages in the MM, or a negative error * code on error (see __get_user_pages()). Note that this function reports * errors related to VMAs, such as incompatible mappings, as expected by * MADV_POPULATE_(READ|WRITE). * * The range must be page-aligned. * * mm->mmap_lock must be held. If it's released, *@locked will be set to 0. */ long faultin_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, bool write, int *locked) { unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; long ret; VM_BUG_ON(!PAGE_ALIGNED(start)); VM_BUG_ON(!PAGE_ALIGNED(end)); mmap_assert_locked(mm); /* * FOLL_TOUCH: Mark page accessed and thereby young; will also mark * the page dirty with FOLL_WRITE -- which doesn't make a * difference with !FOLL_FORCE, because the page is writable * in the page table. * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit * a poisoned page. * !FOLL_FORCE: Require proper access permissions. */ gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE | FOLL_MADV_POPULATE; if (write) gup_flags |= FOLL_WRITE; ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked, gup_flags); lru_add_drain(); return ret; } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_lock must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; mmap_read_lock(mm); vma = find_vma_intersection(mm, nstart, end); } else if (nstart >= vma->vm_end) vma = find_vma_intersection(mm, vma->vm_end, end); if (!vma) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) mmap_read_unlock(mm); return ret; /* 0 or negative error code */ } #else /* CONFIG_MMU */ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int foll_flags) { struct vm_area_struct *vma; bool must_unlock = false; unsigned long vm_flags; long i; if (!nr_pages) return 0; /* * The internal caller expects GUP to manage the lock internally and the * lock must be released when this returns. */ if (!*locked) { if (mmap_read_lock_killable(mm)) return -EAGAIN; must_unlock = true; *locked = 1; } /* calculate required read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ vm_flags = (foll_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= (foll_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); for (i = 0; i < nr_pages; i++) { vma = find_vma(mm, start); if (!vma) break; /* protect what we can, including chardevs */ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) break; if (pages) { pages[i] = virt_to_page((void *)start); if (pages[i]) get_page(pages[i]); } start = (start + PAGE_SIZE) & PAGE_MASK; } if (must_unlock && *locked) { mmap_read_unlock(mm); *locked = 0; } return i ? : -EFAULT; } #endif /* !CONFIG_MMU */ /** * fault_in_writeable - fault in userspace address range for writing * @uaddr: start of address range * @size: size of address range * * Returns the number of bytes not faulted in (like copy_to_user() and * copy_from_user()). */ size_t fault_in_writeable(char __user *uaddr, size_t size) { char __user *start = uaddr, *end; if (unlikely(size == 0)) return 0; if (!user_write_access_begin(uaddr, size)) return size; if (!PAGE_ALIGNED(uaddr)) { unsafe_put_user(0, uaddr, out); uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); } end = (char __user *)PAGE_ALIGN((unsigned long)start + size); if (unlikely(end < start)) end = NULL; while (uaddr != end) { unsafe_put_user(0, uaddr, out); uaddr += PAGE_SIZE; } out: user_write_access_end(); if (size > uaddr - start) return size - (uaddr - start); return 0; } EXPORT_SYMBOL(fault_in_writeable); /** * fault_in_subpage_writeable - fault in an address range for writing * @uaddr: start of address range * @size: size of address range * * Fault in a user address range for writing while checking for permissions at * sub-page granularity (e.g. arm64 MTE). This function should be used when * the caller cannot guarantee forward progress of a copy_to_user() loop. * * Returns the number of bytes not faulted in (like copy_to_user() and * copy_from_user()). */ size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) { size_t faulted_in; /* * Attempt faulting in at page granularity first for page table * permission checking. The arch-specific probe_subpage_writeable() * functions may not check for this. */ faulted_in = size - fault_in_writeable(uaddr, size); if (faulted_in) faulted_in -= probe_subpage_writeable(uaddr, faulted_in); return size - faulted_in; } EXPORT_SYMBOL(fault_in_subpage_writeable); /* * fault_in_safe_writeable - fault in an address range for writing * @uaddr: start of address range * @size: length of address range * * Faults in an address range for writing. This is primarily useful when we * already know that some or all of the pages in the address range aren't in * memory. * * Unlike fault_in_writeable(), this function is non-destructive. * * Note that we don't pin or otherwise hold the pages referenced that we fault * in. There's no guarantee that they'll stay in memory for any duration of * time. * * Returns the number of bytes not faulted in, like copy_to_user() and * copy_from_user(). */ size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) { unsigned long start = (unsigned long)uaddr, end; struct mm_struct *mm = current->mm; bool unlocked = false; if (unlikely(size == 0)) return 0; end = PAGE_ALIGN(start + size); if (end < start) end = 0; mmap_read_lock(mm); do { if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked)) break; start = (start + PAGE_SIZE) & PAGE_MASK; } while (start != end); mmap_read_unlock(mm); if (size > (unsigned long)uaddr - start) return size - ((unsigned long)uaddr - start); return 0; } EXPORT_SYMBOL(fault_in_safe_writeable); /** * fault_in_readable - fault in userspace address range for reading * @uaddr: start of user address range * @size: size of user address range * * Returns the number of bytes not faulted in (like copy_to_user() and * copy_from_user()). */ size_t fault_in_readable(const char __user *uaddr, size_t size) { const char __user *start = uaddr, *end; volatile char c; if (unlikely(size == 0)) return 0; if (!user_read_access_begin(uaddr, size)) return size; if (!PAGE_ALIGNED(uaddr)) { unsafe_get_user(c, uaddr, out); uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); } end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); if (unlikely(end < start)) end = NULL; while (uaddr != end) { unsafe_get_user(c, uaddr, out); uaddr += PAGE_SIZE; } out: user_read_access_end(); (void)c; if (size > uaddr - start) return size - (uaddr - start); return 0; } EXPORT_SYMBOL(fault_in_readable); /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save disk space. * * Called without mmap_lock (takes and releases the mmap_lock by itself). */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct page *page; int locked = 0; int ret; ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, FOLL_FORCE | FOLL_DUMP | FOLL_GET); return (ret == 1) ? page : NULL; } #endif /* CONFIG_ELF_CORE */ #ifdef CONFIG_MIGRATION /* * Returns the number of collected folios. Return value is always >= 0. */ static unsigned long collect_longterm_unpinnable_folios( struct list_head *movable_folio_list, unsigned long nr_folios, struct folio **folios) { unsigned long i, collected = 0; struct folio *prev_folio = NULL; bool drain_allow = true; for (i = 0; i < nr_folios; i++) { struct folio *folio = folios[i]; if (folio == prev_folio) continue; prev_folio = folio; if (folio_is_longterm_pinnable(folio)) continue; collected++; if (folio_is_device_coherent(folio)) continue; if (folio_test_hugetlb(folio)) { isolate_hugetlb(folio, movable_folio_list); continue; } if (!folio_test_lru(folio) && drain_allow) { lru_add_drain_all(); drain_allow = false; } if (!folio_isolate_lru(folio)) continue; list_add_tail(&folio->lru, movable_folio_list); node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); } return collected; } /* * Unpins all folios and migrates device coherent folios and movable_folio_list. * Returns -EAGAIN if all folios were successfully migrated or -errno for * failure (or partial success). */ static int migrate_longterm_unpinnable_folios( struct list_head *movable_folio_list, unsigned long nr_folios, struct folio **folios) { int ret; unsigned long i; for (i = 0; i < nr_folios; i++) { struct folio *folio = folios[i]; if (folio_is_device_coherent(folio)) { /* * Migration will fail if the folio is pinned, so * convert the pin on the source folio to a normal * reference. */ folios[i] = NULL; folio_get(folio); gup_put_folio(folio, 1, FOLL_PIN); if (migrate_device_coherent_folio(folio)) { ret = -EBUSY; goto err; } continue; } /* * We can't migrate folios with unexpected references, so drop * the reference obtained by __get_user_pages_locked(). * Migrating folios have been added to movable_folio_list after * calling folio_isolate_lru() which takes a reference so the * folio won't be freed if it's migrating. */ unpin_folio(folios[i]); folios[i] = NULL; } if (!list_empty(movable_folio_list)) { struct migration_target_control mtc = { .nid = NUMA_NO_NODE, .gfp_mask = GFP_USER | __GFP_NOWARN, .reason = MR_LONGTERM_PIN, }; if (migrate_pages(movable_folio_list, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_LONGTERM_PIN, NULL)) { ret = -ENOMEM; goto err; } } putback_movable_pages(movable_folio_list); return -EAGAIN; err: unpin_folios(folios, nr_folios); putback_movable_pages(movable_folio_list); return ret; } /* * Check whether all folios are *allowed* to be pinned indefinitely (longterm). * Rather confusingly, all folios in the range are required to be pinned via * FOLL_PIN, before calling this routine. * * If any folios in the range are not allowed to be pinned, then this routine * will migrate those folios away, unpin all the folios in the range and return * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then * call this routine again. * * If an error other than -EAGAIN occurs, this indicates a migration failure. * The caller should give up, and propagate the error back up the call stack. * * If everything is OK and all folios in the range are allowed to be pinned, * then this routine leaves all folios pinned and returns zero for success. */ static long check_and_migrate_movable_folios(unsigned long nr_folios, struct folio **folios) { unsigned long collected; LIST_HEAD(movable_folio_list); collected = collect_longterm_unpinnable_folios(&movable_folio_list, nr_folios, folios); if (!collected) return 0; return migrate_longterm_unpinnable_folios(&movable_folio_list, nr_folios, folios); } /* * This routine just converts all the pages in the @pages array to folios and * calls check_and_migrate_movable_folios() to do the heavy lifting. * * Please see the check_and_migrate_movable_folios() documentation for details. */ static long check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages) { struct folio **folios; long i, ret; folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL); if (!folios) return -ENOMEM; for (i = 0; i < nr_pages; i++) folios[i] = page_folio(pages[i]); ret = check_and_migrate_movable_folios(nr_pages, folios); kfree(folios); return ret; } #else static long check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages) { return 0; } static long check_and_migrate_movable_folios(unsigned long nr_folios, struct folio **folios) { return 0; } #endif /* CONFIG_MIGRATION */ /* * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which * allows us to process the FOLL_LONGTERM flag. */ static long __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int gup_flags) { unsigned int flags; long rc, nr_pinned_pages; if (!(gup_flags & FOLL_LONGTERM)) return __get_user_pages_locked(mm, start, nr_pages, pages, locked, gup_flags); flags = memalloc_pin_save(); do { nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, pages, locked, gup_flags); if (nr_pinned_pages <= 0) { rc = nr_pinned_pages; break; } /* FOLL_LONGTERM implies FOLL_PIN */ rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); } while (rc == -EAGAIN); memalloc_pin_restore(flags); return rc ? rc : nr_pinned_pages; } /* * Check that the given flags are valid for the exported gup/pup interface, and * update them with the required flags that the caller must have set. */ static bool is_valid_gup_args(struct page **pages, int *locked, unsigned int *gup_flags_p, unsigned int to_set) { unsigned int gup_flags = *gup_flags_p; /* * These flags not allowed to be specified externally to the gup * interfaces: * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote() * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL */ if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS)) return false; gup_flags |= to_set; if (locked) { /* At the external interface locked must be set */ if (WARN_ON_ONCE(*locked != 1)) return false; gup_flags |= FOLL_UNLOCKABLE; } /* FOLL_GET and FOLL_PIN are mutually exclusive. */ if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return false; /* LONGTERM can only be specified when pinning */ if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) return false; /* Pages input must be given if using GET/PIN */ if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) return false; /* We want to allow the pgmap to be hot-unplugged at all times */ if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && (gup_flags & FOLL_PCI_P2PDMA))) return false; *gup_flags_p = gup_flags; return true; } #ifdef CONFIG_MMU /** * get_user_pages_remote() - pin user pages in memory * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Returns either number of pages pinned (which may be less than the * number requested), or an error. Details about the return value: * * -- If nr_pages is 0, returns 0. * -- If nr_pages is >0, but no pages were pinned, returns -errno. * -- If nr_pages is >0, and some pages were pinned, returns the number of * pages pinned. Again, this may be less than nr_pages. * * The caller is responsible for releasing returned @pages, via put_page(). * * Must be called with mmap_lock held for read or write. * * get_user_pages_remote walks a process's page tables and takes a reference * to each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages_remote returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re-faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. * * get_user_pages_remote is typically used for fewer-copy IO operations, * to get a handle on the memory by some means other than accesses * via the user virtual addresses. The pages may be submitted for * DMA to devices or accessed via their kernel linear mapping (via the * kmap APIs). Care should be taken to use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages_remote should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages_remote because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { int local_locked = 1; if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_TOUCH | FOLL_REMOTE)) return -EINVAL; return __get_user_pages_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); } EXPORT_SYMBOL(get_user_pages_remote); #else /* CONFIG_MMU */ long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { return 0; } #endif /* !CONFIG_MMU */ /** * get_user_pages() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * * This is the same as get_user_pages_remote(), just with a less-flexible * calling convention where we assume that the mm being operated on belongs to * the current task, and doesn't allow passing of a locked parameter. We also * obviously don't pass FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) { int locked = 1; if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages); /* * get_user_pages_unlocked() is suitable to replace the form: * * mmap_read_lock(mm); * get_user_pages(mm, ..., pages, NULL); * mmap_read_unlock(mm); * * with: * * get_user_pages_unlocked(mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { int locked = 0; if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH | FOLL_UNLOCKABLE)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages_unlocked); /* * GUP-fast * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the gup_fast() walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GUP_FAST /* * Used in the GUP-fast path to determine whether GUP is permitted to work on * a specific folio. * * This call assumes the caller has pinned the folio, that the lowest page table * level still points to this folio, and that interrupts have been disabled. * * GUP-fast must reject all secretmem folios. * * Writing to pinned file-backed dirty tracked folios is inherently problematic * (see comment describing the writable_file_mapping_allowed() function). We * therefore try to avoid the most egregious case of a long-term mapping doing * so. * * This function cannot be as thorough as that one as the VMA is not available * in the fast path, so instead we whitelist known good cases and if in doubt, * fall back to the slow path. */ static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) { bool reject_file_backed = false; struct address_space *mapping; bool check_secretmem = false; unsigned long mapping_flags; /* * If we aren't pinning then no problematic write can occur. A long term * pin is the most egregious case so this is the one we disallow. */ if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) reject_file_backed = true; /* We hold a folio reference, so we can safely access folio fields. */ /* secretmem folios are always order-0 folios. */ if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) check_secretmem = true; if (!reject_file_backed && !check_secretmem) return true; if (WARN_ON_ONCE(folio_test_slab(folio))) return false; /* hugetlb neither requires dirty-tracking nor can be secretmem. */ if (folio_test_hugetlb(folio)) return true; /* * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods * cannot proceed, which means no actions performed under RCU can * proceed either. * * inodes and thus their mappings are freed under RCU, which means the * mapping cannot be freed beneath us and thus we can safely dereference * it. */ lockdep_assert_irqs_disabled(); /* * However, there may be operations which _alter_ the mapping, so ensure * we read it once and only once. */ mapping = READ_ONCE(folio->mapping); /* * The mapping may have been truncated, in any case we cannot determine * if this mapping is safe - fall back to slow path to determine how to * proceed. */ if (!mapping) return false; /* Anonymous folios pose no problem. */ mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; if (mapping_flags) return mapping_flags & PAGE_MAPPING_ANON; /* * At this point, we know the mapping is non-null and points to an * address_space object. */ if (check_secretmem && secretmem_mapping(mapping)) return false; /* The only remaining allowed file system is shmem. */ return !reject_file_backed || shmem_mapping(mapping); } static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start, unsigned int flags, struct page **pages) { while ((*nr) - nr_start) { struct folio *folio = page_folio(pages[--(*nr)]); folio_clear_referenced(folio); gup_put_folio(folio, 1, flags); } } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL /* * GUP-fast relies on pte change detection to avoid concurrent pgtable * operations. * * To pin the page, GUP-fast needs to do below in order: * (1) pin the page (by prefetching pte), then (2) check pte not changed. * * For the rest of pgtable operations where pgtable updates can be racy * with GUP-fast, we need to do (1) clear pte, then (2) check whether page * is pinned. * * Above will work for all pte-level operations, including THP split. * * For THP collapse, it's a bit more complicated because GUP-fast may be * walking a pgtable page that is being freed (pte is still valid but pmd * can be cleared already). To avoid race in such condition, we need to * also check pmd here to make sure pmd doesn't change (corresponds to * pmdp_collapse_flush() in the THP collapse code path). */ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); if (!ptep) return 0; do { pte_t pte = ptep_get_lockless(ptep); struct page *page; struct folio *folio; /* * Always fallback to ordinary GUP on PROT_NONE-mapped pages: * pte_access_permitted() better should reject these pages * either way: otherwise, GUP-fast might succeed in * cases where ordinary GUP would fail due to VMA access * permissions. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, flags & FOLL_WRITE)) goto pte_unmap; if (pte_devmap(pte)) { if (unlikely(flags & FOLL_LONGTERM)) goto pte_unmap; pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); folio = try_grab_folio_fast(page, 1, flags); if (!folio) goto pte_unmap; if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } /* * We need to make the page accessible if and only if we are * going to access its content (the FOLL_PIN case). Please * see Documentation/core-api/pin_user_pages.rst for * details. */ if (flags & FOLL_PIN) { ret = arch_make_folio_accessible(folio); if (ret) { gup_put_folio(folio, 1, flags); goto pte_unmap; } } folio_set_referenced(folio); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * get_user_pages_fast_only implementation that can pin pages. Thus it's still * useful to have gup_fast_pmd_leaf even if we can't operate on ptes. */ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct folio *folio; struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); break; } if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); break; } folio = try_grab_folio_fast(page, 1, flags); if (!folio) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); break; } folio_set_referenced(folio); pages[*nr] = page; (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); put_dev_pagemap(pgmap); return addr == end; } static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); return 0; } return 1; } static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); return 0; } return 1; } #else static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { struct page *page; struct folio *folio; int refs; if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) return 0; if (pmd_special(orig)) return 0; if (pmd_devmap(orig)) { if (unlikely(flags & FOLL_LONGTERM)) return 0; return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags, pages, nr); } page = pmd_page(orig); refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); folio = try_grab_folio_fast(page, refs, flags); if (!folio) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { gup_put_folio(folio, refs, flags); return 0; } if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; } *nr += refs; folio_set_referenced(folio); return 1; } static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { struct page *page; struct folio *folio; int refs; if (!pud_access_permitted(orig, flags & FOLL_WRITE)) return 0; if (pud_special(orig)) return 0; if (pud_devmap(orig)) { if (unlikely(flags & FOLL_LONGTERM)) return 0; return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags, pages, nr); } page = pud_page(orig); refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); folio = try_grab_folio_fast(page, refs, flags); if (!folio) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { gup_put_folio(folio, refs, flags); return 0; } if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; } *nr += refs; folio_set_referenced(folio); return 1; } static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { int refs; struct page *page; struct folio *folio; if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); page = pgd_page(orig); refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr); folio = try_grab_folio_fast(page, refs, flags); if (!folio) return 0; if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { gup_put_folio(folio, refs, flags); return 0; } if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; } if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } *nr += refs; folio_set_referenced(folio); return 1; } static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset_lockless(pudp, pud, addr); do { pmd_t pmd = pmdp_get_lockless(pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_leaf(pmd))) { /* See gup_fast_pte_range() */ if (pmd_protnone(pmd)) return 0; if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags, pages, nr)) return 0; } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset_lockless(p4dp, p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (unlikely(!pud_present(pud))) return 0; if (unlikely(pud_leaf(pud))) { if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags, pages, nr)) return 0; } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset_lockless(pgdp, pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (!p4d_present(p4d)) return 0; BUILD_BUG_ON(p4d_leaf(p4d)); if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_fast_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_leaf(pgd))) { if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags, pages, nr)) return; } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #else static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { } #endif /* CONFIG_HAVE_GUP_FAST */ #ifndef gup_fast_permitted /* * Check if it's allowed to use get_user_pages_fast_only() for the range, or * we need to fall back to the slow version: */ static bool gup_fast_permitted(unsigned long start, unsigned long end) { return true; } #endif static unsigned long gup_fast(unsigned long start, unsigned long end, unsigned int gup_flags, struct page **pages) { unsigned long flags; int nr_pinned = 0; unsigned seq; if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) || !gup_fast_permitted(start, end)) return 0; if (gup_flags & FOLL_PIN) { seq = raw_read_seqcount(¤t->mm->write_protect_seq); if (seq & 1) return 0; } /* * Disable interrupts. The nested form is used, in order to allow full, * general purpose use of this routine. * * With interrupts disabled, we block page table pages from being freed * from under us. See struct mmu_table_batch comments in * include/asm-generic/tlb.h for more details. * * We do not adopt an rcu_read_lock() here as we also want to block IPIs * that come from THPs splitting. */ local_irq_save(flags); gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); local_irq_restore(flags); /* * When pinning pages for DMA there could be a concurrent write protect * from fork() via copy_page_range(), in this case always fail GUP-fast. */ if (gup_flags & FOLL_PIN) { if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { gup_fast_unpin_user_pages(pages, nr_pinned); return 0; } else { sanity_check_pinned_pages(pages, nr_pinned); } } return nr_pinned; } static int gup_fast_fallback(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) { unsigned long len, end; unsigned long nr_pinned; int locked = 0; int ret; if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | FOLL_FORCE | FOLL_PIN | FOLL_GET | FOLL_FAST_ONLY | FOLL_NOFAULT | FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) return -EINVAL; if (gup_flags & FOLL_PIN) mm_set_has_pinned_flag(¤t->mm->flags); if (!(gup_flags & FOLL_FAST_ONLY)) might_lock_read(¤t->mm->mmap_lock); start = untagged_addr(start) & PAGE_MASK; len = nr_pages << PAGE_SHIFT; if (check_add_overflow(start, len, &end)) return -EOVERFLOW; if (end > TASK_SIZE_MAX) return -EFAULT; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; nr_pinned = gup_fast(start, end, gup_flags, pages); if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) return nr_pinned; /* Slow path: try to get the remaining pages with get_user_pages */ start += nr_pinned << PAGE_SHIFT; pages += nr_pinned; ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, pages, &locked, gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); if (ret < 0) { /* * The caller has to unpin the pages we already pinned so * returning -errno is not an option */ if (nr_pinned) return nr_pinned; return ret; } return ret + nr_pinned; } /** * get_user_pages_fast_only() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * * If the architecture does not support this function, simply return with no * pages pinned. * * Careful, careful! COW breaking can go either way, so a non-write * access can get ambiguous page results. If you call this function without * 'write' set, you'd better be sure that you're ok with that ambiguity. */ int get_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { /* * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, * because gup fast is always a "pin with a +1 page refcount" request. * * FOLL_FAST_ONLY is required in order to match the API description of * this routine: no fall back to regular ("slow") GUP. */ if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET | FOLL_FAST_ONLY)) return -EINVAL; return gup_fast_fallback(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(get_user_pages_fast_only); /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_lock. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number requested. * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns * -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { /* * The caller may or may not have explicitly set FOLL_GET; either way is * OK. However, internally (within mm/gup.c), gup fast variants must set * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" * request. */ if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) return -EINVAL; return gup_fast_fallback(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(get_user_pages_fast); /** * pin_user_pages_fast() - pin user pages in memory without taking locks * * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See * get_user_pages_fast() for documentation on the function arguments, because * the arguments here are identical. * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for further details. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page() will not remove pins from it. */ int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return -EINVAL; return gup_fast_fallback(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(pin_user_pages_fast); /** * pin_user_pages_remote() - pin pages of a remote process * * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See * get_user_pages_remote() for documentation on the function arguments, because * the arguments here are identical. * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { int local_locked = 1; if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) return 0; return __gup_longterm_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages_remote); /** * pin_user_pages() - pin user pages in memory for use by other devices * * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and * FOLL_PIN is set. * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) { int locked = 1; if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); /* * pin_user_pages_unlocked() is the FOLL_PIN variant of * get_user_pages_unlocked(). Behavior is the same, except that this one sets * FOLL_PIN and rejects FOLL_GET. * * Note that if a zero_page is amongst the returned pages, it will not have * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { int locked = 0; if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages_unlocked); /** * memfd_pin_folios() - pin folios associated with a memfd * @memfd: the memfd whose folios are to be pinned * @start: the first memfd offset * @end: the last memfd offset (inclusive) * @folios: array that receives pointers to the folios pinned * @max_folios: maximum number of entries in @folios * @offset: the offset into the first folio * * Attempt to pin folios associated with a memfd in the contiguous range * [start, end]. Given that a memfd is either backed by shmem or hugetlb, * the folios can either be found in the page cache or need to be allocated * if necessary. Once the folios are located, they are all pinned via * FOLL_PIN and @offset is populatedwith the offset into the first folio. * And, eventually, these pinned folios must be released either using * unpin_folios() or unpin_folio(). * * It must be noted that the folios may be pinned for an indefinite amount * of time. And, in most cases, the duration of time they may stay pinned * would be controlled by the userspace. This behavior is effectively the * same as using FOLL_LONGTERM with other GUP APIs. * * Returns number of folios pinned, which could be less than @max_folios * as it depends on the folio sizes that cover the range [start, end]. * If no folios were pinned, it returns -errno. */ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, struct folio **folios, unsigned int max_folios, pgoff_t *offset) { unsigned int flags, nr_folios, nr_found; unsigned int i, pgshift = PAGE_SHIFT; pgoff_t start_idx, end_idx, next_idx; struct folio *folio = NULL; struct folio_batch fbatch; struct hstate *h; long ret = -EINVAL; if (start < 0 || start > end || !max_folios) return -EINVAL; if (!memfd) return -EINVAL; if (!shmem_file(memfd) && !is_file_hugepages(memfd)) return -EINVAL; if (end >= i_size_read(file_inode(memfd))) return -EINVAL; if (is_file_hugepages(memfd)) { h = hstate_file(memfd); pgshift = huge_page_shift(h); } flags = memalloc_pin_save(); do { nr_folios = 0; start_idx = start >> pgshift; end_idx = end >> pgshift; if (is_file_hugepages(memfd)) { start_idx <<= huge_page_order(h); end_idx <<= huge_page_order(h); } folio_batch_init(&fbatch); while (start_idx <= end_idx && nr_folios < max_folios) { /* * In most cases, we should be able to find the folios * in the page cache. If we cannot find them for some * reason, we try to allocate them and add them to the * page cache. */ nr_found = filemap_get_folios_contig(memfd->f_mapping, &start_idx, end_idx, &fbatch); if (folio) { folio_put(folio); folio = NULL; } next_idx = 0; for (i = 0; i < nr_found; i++) { /* * As there can be multiple entries for a * given folio in the batch returned by * filemap_get_folios_contig(), the below * check is to ensure that we pin and return a * unique set of folios between start and end. */ if (next_idx && next_idx != folio_index(fbatch.folios[i])) continue; folio = page_folio(&fbatch.folios[i]->page); if (try_grab_folio(folio, 1, FOLL_PIN)) { folio_batch_release(&fbatch); ret = -EINVAL; goto err; } if (nr_folios == 0) *offset = offset_in_folio(folio, start); folios[nr_folios] = folio; next_idx = folio_next_index(folio); if (++nr_folios == max_folios) break; } folio = NULL; folio_batch_release(&fbatch); if (!nr_found) { folio = memfd_alloc_folio(memfd, start_idx); if (IS_ERR(folio)) { ret = PTR_ERR(folio); if (ret != -EEXIST) goto err; folio = NULL; } } } ret = check_and_migrate_movable_folios(nr_folios, folios); } while (ret == -EAGAIN); memalloc_pin_restore(flags); return ret ? ret : nr_folios; err: memalloc_pin_restore(flags); unpin_folios(folios, nr_folios); return ret; } EXPORT_SYMBOL_GPL(memfd_pin_folios); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM thermal #if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_THERMAL_H #include <linux/devfreq.h> #include <linux/thermal.h> #include <linux/tracepoint.h> #include "thermal_core.h" TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL); TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT); TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE); TRACE_DEFINE_ENUM(THERMAL_TRIP_ACTIVE); #define show_tzt_type(type) \ __print_symbolic(type, \ { THERMAL_TRIP_CRITICAL, "CRITICAL"}, \ { THERMAL_TRIP_HOT, "HOT"}, \ { THERMAL_TRIP_PASSIVE, "PASSIVE"}, \ { THERMAL_TRIP_ACTIVE, "ACTIVE"}) TRACE_EVENT(thermal_temperature, TP_PROTO(struct thermal_zone_device *tz), TP_ARGS(tz), TP_STRUCT__entry( __string(thermal_zone, tz->type) __field(int, id) __field(int, temp_prev) __field(int, temp) ), TP_fast_assign( __assign_str(thermal_zone); __entry->id = tz->id; __entry->temp_prev = tz->last_temperature; __entry->temp = tz->temperature; ), TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d", __get_str(thermal_zone), __entry->id, __entry->temp_prev, __entry->temp) ); TRACE_EVENT(cdev_update, TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target), TP_ARGS(cdev, target), TP_STRUCT__entry( __string(type, cdev->type) __field(unsigned long, target) ), TP_fast_assign( __assign_str(type); __entry->target = target; ), TP_printk("type=%s target=%lu", __get_str(type), __entry->target) ); TRACE_EVENT(thermal_zone_trip, TP_PROTO(struct thermal_zone_device *tz, int trip, enum thermal_trip_type trip_type), TP_ARGS(tz, trip, trip_type), TP_STRUCT__entry( __string(thermal_zone, tz->type) __field(int, id) __field(int, trip) __field(enum thermal_trip_type, trip_type) ), TP_fast_assign( __assign_str(thermal_zone); __entry->id = tz->id; __entry->trip = trip; __entry->trip_type = trip_type; ), TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%s", __get_str(thermal_zone), __entry->id, __entry->trip, show_tzt_type(__entry->trip_type)) ); #ifdef CONFIG_CPU_THERMAL TRACE_EVENT(thermal_power_cpu_get_power_simple, TP_PROTO(int cpu, u32 power), TP_ARGS(cpu, power), TP_STRUCT__entry( __field(int, cpu) __field(u32, power) ), TP_fast_assign( __entry->cpu = cpu; __entry->power = power; ), TP_printk("cpu=%d power=%u", __entry->cpu, __entry->power) ); TRACE_EVENT(thermal_power_cpu_limit, TP_PROTO(const struct cpumask *cpus, unsigned int freq, unsigned long cdev_state, u32 power), TP_ARGS(cpus, freq, cdev_state, power), TP_STRUCT__entry( __bitmask(cpumask, num_possible_cpus()) __field(unsigned int, freq ) __field(unsigned long, cdev_state) __field(u32, power ) ), TP_fast_assign( __assign_bitmask(cpumask, cpumask_bits(cpus), num_possible_cpus()); __entry->freq = freq; __entry->cdev_state = cdev_state; __entry->power = power; ), TP_printk("cpus=%s freq=%u cdev_state=%lu power=%u", __get_bitmask(cpumask), __entry->freq, __entry->cdev_state, __entry->power) ); #endif /* CONFIG_CPU_THERMAL */ #ifdef CONFIG_DEVFREQ_THERMAL TRACE_EVENT(thermal_power_devfreq_get_power, TP_PROTO(struct thermal_cooling_device *cdev, struct devfreq_dev_status *status, unsigned long freq, u32 power), TP_ARGS(cdev, status, freq, power), TP_STRUCT__entry( __string(type, cdev->type ) __field(unsigned long, freq ) __field(u32, busy_time) __field(u32, total_time) __field(u32, power) ), TP_fast_assign( __assign_str(type); __entry->freq = freq; __entry->busy_time = status->busy_time; __entry->total_time = status->total_time; __entry->power = power; ), TP_printk("type=%s freq=%lu load=%u power=%u", __get_str(type), __entry->freq, __entry->total_time == 0 ? 0 : (100 * __entry->busy_time) / __entry->total_time, __entry->power) ); TRACE_EVENT(thermal_power_devfreq_limit, TP_PROTO(struct thermal_cooling_device *cdev, unsigned long freq, unsigned long cdev_state, u32 power), TP_ARGS(cdev, freq, cdev_state, power), TP_STRUCT__entry( __string(type, cdev->type) __field(unsigned int, freq ) __field(unsigned long, cdev_state) __field(u32, power ) ), TP_fast_assign( __assign_str(type); __entry->freq = freq; __entry->cdev_state = cdev_state; __entry->power = power; ), TP_printk("type=%s freq=%u cdev_state=%lu power=%u", __get_str(type), __entry->freq, __entry->cdev_state, __entry->power) ); #endif /* CONFIG_DEVFREQ_THERMAL */ #endif /* _TRACE_THERMAL_H */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE thermal_trace /* This part must be outside protection */ #include <trace/define_trace.h> |
6 5 2 5 2 5 2 5 2 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 | // SPDX-License-Identifier: GPL-2.0-only /* * RTL8XXXU mac80211 USB driver - 8723b specific subdriver * * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This driver was written as a replacement for the vendor provided * rtl8723au driver. As the Realtek 8xxx chips are very similar in * their programming interface, I have started adding support for * additional 8xxx chips like the 8192cu, 8188cus, etc. */ #include "regs.h" #include "rtl8xxxu.h" static const struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = { {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0}, {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00}, {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05}, {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01}, {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00}, {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f}, {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00}, {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f}, {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a}, {0x525, 0x4f}, {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04}, {0xffff, 0xff}, }; static const struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = { {0x800, 0x80040000}, {0x804, 0x00000003}, {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, {0x810, 0x10001331}, {0x814, 0x020c3d10}, {0x818, 0x02200385}, {0x81c, 0x00000000}, {0x820, 0x01000100}, {0x824, 0x00190204}, {0x828, 0x00000000}, {0x82c, 0x00000000}, {0x830, 0x00000000}, {0x834, 0x00000000}, {0x838, 0x00000000}, {0x83c, 0x00000000}, {0x840, 0x00010000}, {0x844, 0x00000000}, {0x848, 0x00000000}, {0x84c, 0x00000000}, {0x850, 0x00000000}, {0x854, 0x00000000}, {0x858, 0x569a11a9}, {0x85c, 0x01000014}, {0x860, 0x66f60110}, {0x864, 0x061f0649}, {0x868, 0x00000000}, {0x86c, 0x27272700}, {0x870, 0x07000760}, {0x874, 0x25004000}, {0x878, 0x00000808}, {0x87c, 0x00000000}, {0x880, 0xb0000c1c}, {0x884, 0x00000001}, {0x888, 0x00000000}, {0x88c, 0xccc000c0}, {0x890, 0x00000800}, {0x894, 0xfffffffe}, {0x898, 0x40302010}, {0x89c, 0x00706050}, {0x900, 0x00000000}, {0x904, 0x00000023}, {0x908, 0x00000000}, {0x90c, 0x81121111}, {0x910, 0x00000002}, {0x914, 0x00000201}, {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c}, {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f}, {0xa10, 0x9500bb78}, {0xa14, 0x1114d028}, {0xa18, 0x00881117}, {0xa1c, 0x89140f00}, {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317}, {0xa28, 0x00000204}, {0xa2c, 0x00d30000}, {0xa70, 0x101fbf00}, {0xa74, 0x00000007}, {0xa78, 0x00000900}, {0xa7c, 0x225b0606}, {0xa80, 0x21806490}, {0xb2c, 0x00000000}, {0xc00, 0x48071d40}, {0xc04, 0x03a05611}, {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000}, {0xc14, 0x40000100}, {0xc18, 0x08800000}, {0xc1c, 0x40000100}, {0xc20, 0x00000000}, {0xc24, 0x00000000}, {0xc28, 0x00000000}, {0xc2c, 0x00000000}, {0xc30, 0x69e9ac44}, {0xc34, 0x469652af}, {0xc38, 0x49795994}, {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7}, {0xc48, 0xec020107}, {0xc4c, 0x007f037f}, {0xc50, 0x69553420}, {0xc54, 0x43bc0094}, {0xc58, 0x00013149}, {0xc5c, 0x00250492}, {0xc60, 0x00000000}, {0xc64, 0x7112848b}, {0xc68, 0x47c00bff}, {0xc6c, 0x00000036}, {0xc70, 0x2c7f000d}, {0xc74, 0x020610db}, {0xc78, 0x0000001f}, {0xc7c, 0x00b91612}, {0xc80, 0x390000e4}, {0xc84, 0x20f60000}, {0xc88, 0x40000100}, {0xc8c, 0x20200000}, {0xc90, 0x00020e1a}, {0xc94, 0x00000000}, {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f}, {0xca0, 0x00000000}, {0xca4, 0x000300a0}, {0xca8, 0x00000000}, {0xcac, 0x00000000}, {0xcb0, 0x00000000}, {0xcb4, 0x00000000}, {0xcb8, 0x00000000}, {0xcbc, 0x28000000}, {0xcc0, 0x00000000}, {0xcc4, 0x00000000}, {0xcc8, 0x00000000}, {0xccc, 0x00000000}, {0xcd0, 0x00000000}, {0xcd4, 0x00000000}, {0xcd8, 0x64b22427}, {0xcdc, 0x00766932}, {0xce0, 0x00222222}, {0xce4, 0x00000000}, {0xce8, 0x37644302}, {0xcec, 0x2f97d40c}, {0xd00, 0x00000740}, {0xd04, 0x40020401}, {0xd08, 0x0000907f}, {0xd0c, 0x20010201}, {0xd10, 0xa0633333}, {0xd14, 0x3333bc53}, {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975}, {0xd30, 0x00000000}, {0xd34, 0x80608000}, {0xd38, 0x00000000}, {0xd3c, 0x00127353}, {0xd40, 0x00000000}, {0xd44, 0x00000000}, {0xd48, 0x00000000}, {0xd4c, 0x00000000}, {0xd50, 0x6437140a}, {0xd54, 0x00000000}, {0xd58, 0x00000282}, {0xd5c, 0x30032064}, {0xd60, 0x4653de68}, {0xd64, 0x04518a3c}, {0xd68, 0x00002101}, {0xd6c, 0x2a201c16}, {0xd70, 0x1812362e}, {0xd74, 0x322c2220}, {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d}, {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d}, {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d}, {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d}, {0xe28, 0x00000000}, {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f}, {0xe38, 0x02140102}, {0xe3c, 0x681604c2}, {0xe40, 0x01007c00}, {0xe44, 0x01004800}, {0xe48, 0xfb000000}, {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f}, {0xe58, 0x02140102}, {0xe5c, 0x28160d05}, {0xe60, 0x00000008}, {0xe68, 0x001b2556}, {0xe6c, 0x00c00096}, {0xe70, 0x00c00096}, {0xe74, 0x01000056}, {0xe78, 0x01000014}, {0xe7c, 0x01000056}, {0xe80, 0x01000014}, {0xe84, 0x00c00096}, {0xe88, 0x01000056}, {0xe8c, 0x00c00096}, {0xed0, 0x00c00096}, {0xed4, 0x00c00096}, {0xed8, 0x00c00096}, {0xedc, 0x000000d6}, {0xee0, 0x000000d6}, {0xeec, 0x01c00016}, {0xf14, 0x00000003}, {0xf4c, 0x00000000}, {0xf00, 0x00000300}, {0x820, 0x01000100}, {0x800, 0x83040000}, {0xffff, 0xffffffff}, }; static const struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = { {0xc78, 0xfd000001}, {0xc78, 0xfc010001}, {0xc78, 0xfb020001}, {0xc78, 0xfa030001}, {0xc78, 0xf9040001}, {0xc78, 0xf8050001}, {0xc78, 0xf7060001}, {0xc78, 0xf6070001}, {0xc78, 0xf5080001}, {0xc78, 0xf4090001}, {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001}, {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001}, {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001}, {0xc78, 0xed100001}, {0xc78, 0xec110001}, {0xc78, 0xeb120001}, {0xc78, 0xea130001}, {0xc78, 0xe9140001}, {0xc78, 0xe8150001}, {0xc78, 0xe7160001}, {0xc78, 0xe6170001}, {0xc78, 0xe5180001}, {0xc78, 0xe4190001}, {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001}, {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001}, {0xc78, 0x671e0001}, {0xc78, 0x661f0001}, {0xc78, 0x65200001}, {0xc78, 0x64210001}, {0xc78, 0x63220001}, {0xc78, 0x4a230001}, {0xc78, 0x49240001}, {0xc78, 0x48250001}, {0xc78, 0x47260001}, {0xc78, 0x46270001}, {0xc78, 0x45280001}, {0xc78, 0x44290001}, {0xc78, 0x432a0001}, {0xc78, 0x422b0001}, {0xc78, 0x292c0001}, {0xc78, 0x282d0001}, {0xc78, 0x272e0001}, {0xc78, 0x262f0001}, {0xc78, 0x0a300001}, {0xc78, 0x09310001}, {0xc78, 0x08320001}, {0xc78, 0x07330001}, {0xc78, 0x06340001}, {0xc78, 0x05350001}, {0xc78, 0x04360001}, {0xc78, 0x03370001}, {0xc78, 0x02380001}, {0xc78, 0x01390001}, {0xc78, 0x013a0001}, {0xc78, 0x013b0001}, {0xc78, 0x013c0001}, {0xc78, 0x013d0001}, {0xc78, 0x013e0001}, {0xc78, 0x013f0001}, {0xc78, 0xfc400001}, {0xc78, 0xfb410001}, {0xc78, 0xfa420001}, {0xc78, 0xf9430001}, {0xc78, 0xf8440001}, {0xc78, 0xf7450001}, {0xc78, 0xf6460001}, {0xc78, 0xf5470001}, {0xc78, 0xf4480001}, {0xc78, 0xf3490001}, {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001}, {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001}, {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001}, {0xc78, 0xec500001}, {0xc78, 0xeb510001}, {0xc78, 0xea520001}, {0xc78, 0xe9530001}, {0xc78, 0xe8540001}, {0xc78, 0xe7550001}, {0xc78, 0xe6560001}, {0xc78, 0xe5570001}, {0xc78, 0xe4580001}, {0xc78, 0xe3590001}, {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001}, {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001}, {0xc78, 0x675e0001}, {0xc78, 0x665f0001}, {0xc78, 0x65600001}, {0xc78, 0x64610001}, {0xc78, 0x63620001}, {0xc78, 0x62630001}, {0xc78, 0x61640001}, {0xc78, 0x48650001}, {0xc78, 0x47660001}, {0xc78, 0x46670001}, {0xc78, 0x45680001}, {0xc78, 0x44690001}, {0xc78, 0x436a0001}, {0xc78, 0x426b0001}, {0xc78, 0x286c0001}, {0xc78, 0x276d0001}, {0xc78, 0x266e0001}, {0xc78, 0x256f0001}, {0xc78, 0x24700001}, {0xc78, 0x09710001}, {0xc78, 0x08720001}, {0xc78, 0x07730001}, {0xc78, 0x06740001}, {0xc78, 0x05750001}, {0xc78, 0x04760001}, {0xc78, 0x03770001}, {0xc78, 0x02780001}, {0xc78, 0x01790001}, {0xc78, 0x017a0001}, {0xc78, 0x017b0001}, {0xc78, 0x017c0001}, {0xc78, 0x017d0001}, {0xc78, 0x017e0001}, {0xc78, 0x017f0001}, {0xc50, 0x69553422}, {0xc50, 0x69553420}, {0x824, 0x00390204}, {0xffff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = { {0x00, 0x00010000}, {0xb0, 0x000dffe0}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xb1, 0x00000018}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xfe, 0x00000000}, {0xb2, 0x00084c00}, {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa}, {0xb7, 0x00000010}, {0xb8, 0x0000907f}, {0x5c, 0x00000002}, {0x7c, 0x00000002}, {0x7e, 0x00000005}, {0x8b, 0x0006fc00}, {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2}, {0x1e, 0x00000000}, {0xdf, 0x00000780}, {0x50, 0x00067435}, /* * The 8723bu vendor driver indicates that bit 8 should be set in * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However * they never actually check the package type - and just default * to not setting it. */ {0x51, 0x0006b04e}, {0x52, 0x000007d2}, {0x53, 0x00000000}, {0x54, 0x00050400}, {0x55, 0x0004026e}, {0xdd, 0x0000004c}, {0x70, 0x00067435}, /* * 0x71 has same package type condition as for register 0x51 */ {0x71, 0x0006b04e}, {0x72, 0x000007d2}, {0x73, 0x00000000}, {0x74, 0x00050400}, {0x75, 0x0004026e}, {0xef, 0x00000100}, {0x34, 0x0000add7}, {0x35, 0x00005c00}, {0x34, 0x00009dd4}, {0x35, 0x00005000}, {0x34, 0x00008dd1}, {0x35, 0x00004400}, {0x34, 0x00007dce}, {0x35, 0x00003800}, {0x34, 0x00006cd1}, {0x35, 0x00004400}, {0x34, 0x00005cce}, {0x35, 0x00003800}, {0x34, 0x000048ce}, {0x35, 0x00004400}, {0x34, 0x000034ce}, {0x35, 0x00003800}, {0x34, 0x00002451}, {0x35, 0x00004400}, {0x34, 0x0000144e}, {0x35, 0x00003800}, {0x34, 0x00000051}, {0x35, 0x00004400}, {0xef, 0x00000000}, {0xef, 0x00000100}, {0xed, 0x00000010}, {0x44, 0x0000add7}, {0x44, 0x00009dd4}, {0x44, 0x00008dd1}, {0x44, 0x00007dce}, {0x44, 0x00006cc1}, {0x44, 0x00005cce}, {0x44, 0x000044d1}, {0x44, 0x000034ce}, {0x44, 0x00002451}, {0x44, 0x0000144e}, {0x44, 0x00000051}, {0xef, 0x00000000}, {0xed, 0x00000000}, {0x7f, 0x00020080}, {0xef, 0x00002000}, {0x3b, 0x000380ef}, {0x3b, 0x000302fe}, {0x3b, 0x00028ce6}, {0x3b, 0x000200bc}, {0x3b, 0x000188a5}, {0x3b, 0x00010fbc}, {0x3b, 0x00008f71}, {0x3b, 0x00000900}, {0xef, 0x00000000}, {0xed, 0x00000001}, {0x40, 0x000380ef}, {0x40, 0x000302fe}, {0x40, 0x00028ce6}, {0x40, 0x000200bc}, {0x40, 0x000188a5}, {0x40, 0x00010fbc}, {0x40, 0x00008f71}, {0x40, 0x00000900}, {0xed, 0x00000000}, {0x82, 0x00080000}, {0x83, 0x00008000}, {0x84, 0x00048d80}, {0x85, 0x00068000}, {0xa2, 0x00080000}, {0xa3, 0x00008000}, {0xa4, 0x00048d80}, {0xa5, 0x00068000}, {0xed, 0x00000002}, {0xef, 0x00000002}, {0x56, 0x00000032}, {0x76, 0x00000032}, {0x01, 0x00000780}, {0xff, 0xffffffff} }; static int rtl8723bu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u32 val32, sys_cfg, vendor; int ret = 0; sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK); if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { dev_info(dev, "Unsupported test chip\n"); ret = -ENOTSUPP; goto out; } strscpy(priv->chip_name, "8723BU", sizeof(priv->chip_name)); priv->rtl_chip = RTL8723B; priv->rf_paths = 1; priv->rx_paths = 1; priv->tx_paths = 1; val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL); if (val32 & MULTI_WIFI_FUNC_EN) priv->has_wifi = 1; if (val32 & MULTI_BT_FUNC_EN) priv->has_bluetooth = 1; if (val32 & MULTI_GPS_FUNC_EN) priv->has_gps = 1; priv->is_multi_func = 1; vendor = sys_cfg & SYS_CFG_VENDOR_EXT_MASK; rtl8xxxu_identify_vendor_2bits(priv, vendor); val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS); priv->rom_rev = u32_get_bits(val32, GPIO_RF_RL_ID); rtl8xxxu_config_endpoints_sie(priv); /* * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX */ if (!priv->ep_tx_count) ret = rtl8xxxu_config_endpoints_no_sie(priv); out: return ret; } static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data) { struct h2c_cmd h2c; int reqnum = 0; memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER; h2c.bt_mp_oper.operreq = 0 | (reqnum << 4); h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE; h2c.bt_mp_oper.data = data; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper)); reqnum++; memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER; h2c.bt_mp_oper.operreq = 0 | (reqnum << 4); h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE; h2c.bt_mp_oper.addr = reg; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper)); } static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv) { u8 val8; u16 sys_func; val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL); val8 &= ~BIT(1); rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC); sys_func &= ~SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL); val8 &= ~BIT(1); rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); sys_func |= SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); } static void rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { u32 val32, ofdm, mcs; u8 cck, ofdmbase, mcsbase; int group, tx_idx; tx_idx = 0; group = rtl8xxxu_gen2_channel_to_group(channel); cck = priv->cck_tx_power_index_B[group]; val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32); val32 &= 0xffff00ff; val32 |= (cck << 8); rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32); val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); val32 &= 0xff; val32 |= ((cck << 8) | (cck << 16) | (cck << 24)); rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); ofdmbase = priv->ht40_1s_tx_power_index_B[group]; ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b; ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24; rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm); rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm); mcsbase = priv->ht40_1s_tx_power_index_B[group]; if (ht40) mcsbase += priv->ht40_tx_power_diff[tx_idx++].b; else mcsbase += priv->ht20_tx_power_diff[tx_idx++].b; mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs); rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs); } static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu; int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; ether_addr_copy(priv->mac_addr, efuse->mac_addr); memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base, sizeof(efuse->tx_power_index_A.cck_base)); memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base, sizeof(efuse->tx_power_index_B.cck_base)); memcpy(priv->ht40_1s_tx_power_index_A, efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); memcpy(priv->ht40_1s_tx_power_index_B, efuse->tx_power_index_B.ht40_base, sizeof(efuse->tx_power_index_B.ht40_base)); priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; priv->ofdm_tx_power_diff[0].b = efuse->tx_power_index_B.ht20_ofdm_1s_diff.a; priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; priv->ht20_tx_power_diff[0].b = efuse->tx_power_index_B.ht20_ofdm_1s_diff.b; priv->ht40_tx_power_diff[0].a = 0; priv->ht40_tx_power_diff[0].b = 0; for (i = 1; i < RTL8723B_TX_COUNT; i++) { priv->ofdm_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ofdm; priv->ofdm_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ofdm; priv->ht20_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ht20; priv->ht20_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ht20; priv->ht40_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ht40; priv->ht40_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ht40; } priv->default_crystal_cap = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f; return 0; } static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv) { const char *fw_name; int ret; if (priv->enable_bluetooth) fw_name = "rtlwifi/rtl8723bu_bt.bin"; else fw_name = "rtlwifi/rtl8723bu_nic.bin"; ret = rtl8xxxu_load_firmware(priv, fw_name); return ret; } static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); /* 6. 0x1f[7:0] = 0x07 */ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; rtl8xxxu_write8(priv, REG_RF_CTRL, val8); /* Why? */ rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3); rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80); rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); } static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv) { int ret; ret = rtl8xxxu_init_phy_rf(priv, rtl8723bu_radioa_1t_init_table, RF_A); /* * PHY LCK */ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01); msleep(200); rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0); return ret; } void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) { u32 val32; val32 = rtl8xxxu_read32(priv, REG_PAD_CTRL1); val32 &= ~(BIT(20) | BIT(24)); rtl8xxxu_write32(priv, REG_PAD_CTRL1, val32); val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); val32 &= ~BIT(4); rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32); val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); val32 |= BIT(3); rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 |= BIT(24); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 &= ~BIT(23); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); val32 |= (BIT(0) | BIT(1)); rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC); val32 &= 0xffffff00; val32 |= 0x77; rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32); val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; rtl8xxxu_write32(priv, REG_PWR_DATA, val32); } static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv) { u32 reg_eac, reg_e94, reg_e9c, path_sel, val32; int result = 0; path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Enable path A PA in TX IQK mode */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87); /* * Tx IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * The vendor driver indicates the USB module is always using * S0S1 path 1 for the 8723bu. This may be different for 8192eu */ if (priv->rf_paths > 1) rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); else rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); /* * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu. * No trace of this in the 8192eu or 8188eu vendor drivers. */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(1); /* Restore Ant Path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); #ifdef RTL8723BU_BT /* GNT_BT = 1 */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); #endif /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); val32 = (reg_e9c >> 16) & 0x3ff; if (val32 & 0x200) val32 = 0x400 - val32; if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000) && ((reg_e94 & 0x03ff0000) < 0x01100000) && ((reg_e94 & 0x03ff0000) > 0x00f00000) && val32 < 0xf) result |= 0x01; else /* If TX not OK, ignore RX */ goto out; out: return result; } static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv) { u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32; int result = 0; path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Enable path A PA in TX IQK mode */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7); /* * Tx IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * The vendor driver indicates the USB module is always using * S0S1 path 1 for the 8723bu. This may be different for 8192eu */ if (priv->rf_paths > 1) rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); else rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); /* * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu. * No trace of this in the 8192eu or 8188eu vendor drivers. */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(1); /* Restore Ant Path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); #ifdef RTL8723BU_BT /* GNT_BT = 1 */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); #endif /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); val32 = (reg_e9c >> 16) & 0x3ff; if (val32 & 0x200) val32 = 0x400 - val32; if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000) && ((reg_e94 & 0x03ff0000) < 0x01100000) && ((reg_e94 & 0x03ff0000) > 0x00f00000) && val32 < 0xf) result |= 0x01; else /* If TX not OK, ignore RX */ goto out; val32 = 0x80007c00 | (reg_e94 &0x3ff0000) | ((reg_e9c & 0x3ff0000) >> 16); rtl8xxxu_write32(priv, REG_TX_IQK, val32); /* * Modify RX IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77); /* * PA, PAD setting */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0xf80); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f); /* * RX IQK setting */ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); if (priv->rf_paths > 1) rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); else rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); /* * Disable BT */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(1); /* Restore Ant Path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); #ifdef RTL8723BU_BT /* GNT_BT = 1 */ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); #endif /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x780); val32 = (reg_eac >> 16) & 0x3ff; if (val32 & 0x200) val32 = 0x400 - val32; if (!(reg_eac & BIT(27)) && ((reg_ea4 & 0x03ff0000) != 0x01320000) && ((reg_eac & 0x03ff0000) != 0x00360000) && ((reg_ea4 & 0x03ff0000) < 0x01100000) && ((reg_ea4 & 0x03ff0000) > 0x00f00000) && val32 < 0xf) result |= 0x02; else /* If TX not OK, ignore RX */ goto out; out: return result; } static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, int result[][8], int t) { struct device *dev = &priv->udev->dev; u32 i, val32; int path_a_ok /*, path_b_ok */; int retry = 2; static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = { REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH, REG_RX_WAIT_CCA, REG_TX_CCK_RFON, REG_TX_CCK_BBON, REG_TX_OFDM_RFON, REG_TX_OFDM_BBON, REG_TX_TO_RX, REG_TX_TO_TX, REG_RX_CCK, REG_RX_OFDM, REG_RX_WAIT_RIFS, REG_RX_TO_RX, REG_STANDBY, REG_SLEEP, REG_PMPD_ANAEN }; static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { REG_TXPAUSE, REG_BEACON_CTRL, REG_BEACON_CTRL_1, REG_GPIO_MUXCFG }; static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE, REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE }; u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff; u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff; /* * Note: IQ calibration must be performed after loading * PHY_REG.txt , and radio_a, radio_b.txt */ if (t == 0) { /* Save ADDA parameters, turn Path A ADDA on */ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); rtl8xxxu_save_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); } rtl8xxxu_path_adda_on(priv, adda_regs, true); /* MAC settings */ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup); val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); val32 |= 0x0f000000; rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000); /* * RX IQ calibration setting for 8723B D cut large current issue * when leaving IPS */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED); val32 |= 0x20; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd); for (i = 0; i < retry; i++) { path_a_ok = rtl8723bu_iqk_path_a(priv); if (path_a_ok == 0x01) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); result[t][0] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); result[t][1] = (val32 >> 16) & 0x3ff; break; } } if (!path_a_ok) dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__); for (i = 0; i < retry; i++) { path_a_ok = rtl8723bu_rx_iqk_path_a(priv); if (path_a_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); result[t][2] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); result[t][3] = (val32 >> 16) & 0x3ff; break; } } if (!path_a_ok) dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__); if (priv->tx_paths > 1) { #if 1 dev_warn(dev, "%s: Path B not supported\n", __func__); #else /* * Path A into standby */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000); val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Turn Path B ADDA on */ rtl8xxxu_path_adda_on(priv, adda_regs, false); for (i = 0; i < retry; i++) { path_b_ok = rtl8xxxu_iqk_path_b(priv); if (path_b_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); result[t][4] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); result[t][5] = (val32 >> 16) & 0x3ff; break; } } if (!path_b_ok) dev_dbg(dev, "%s: Path B IQK failed!\n", __func__); for (i = 0; i < retry; i++) { path_b_ok = rtl8723bu_rx_iqk_path_b(priv); if (path_a_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); result[t][6] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); result[t][7] = (val32 >> 16) & 0x3ff; break; } } if (!path_b_ok) dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__); #endif } /* Back to BB mode, load original value */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); if (t) { /* Reload ADDA power saving parameters */ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); /* Reload MAC parameters */ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); /* Reload BB parameters */ rtl8xxxu_restore_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); /* Restore RX initial gain */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); val32 &= 0xffffff00; rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50); rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc); if (priv->tx_paths > 1) { val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1); val32 &= 0xffffff00; rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, val32 | 0x50); rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, val32 | xb_agc); } /* Load 0xe30 IQC default value */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00); } } static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; int result[4][8]; /* last is final result */ int i, candidate; bool path_a_ok, path_b_ok; u32 reg_e94, reg_e9c, reg_ea4, reg_eac; u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc; u32 val32, bt_control; s32 reg_tmp = 0; bool simu; rtl8xxxu_gen2_prepare_calibrate(priv, 1); memset(result, 0, sizeof(result)); candidate = -1; path_a_ok = false; path_b_ok = false; bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU); for (i = 0; i < 3; i++) { rtl8723bu_phy_iqcalibrate(priv, result, i); if (i == 1) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 1); if (simu) { candidate = 0; break; } } if (i == 2) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 2); if (simu) { candidate = 0; break; } simu = rtl8xxxu_gen2_simularity_compare(priv, result, 1, 2); if (simu) { candidate = 1; } else { for (i = 0; i < 8; i++) reg_tmp += result[3][i]; if (reg_tmp) candidate = 3; else candidate = -1; } } } for (i = 0; i < 4; i++) { reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; reg_ec4 = result[i][6]; reg_ecc = result[i][7]; } if (candidate >= 0) { reg_e94 = result[candidate][0]; priv->rege94 = reg_e94; reg_e9c = result[candidate][1]; priv->rege9c = reg_e9c; reg_ea4 = result[candidate][2]; reg_eac = result[candidate][3]; reg_eb4 = result[candidate][4]; priv->regeb4 = reg_eb4; reg_ebc = result[candidate][5]; priv->regebc = reg_ebc; reg_ec4 = result[candidate][6]; reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; } else { reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100; reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0; } if (reg_e94 && candidate >= 0) rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, candidate, (reg_ea4 == 0)); if (priv->tx_paths > 1 && reg_eb4) rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, candidate, (reg_ec4 == 0)); rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED); val32 |= 0x20; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32); rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd); if (priv->rf_paths > 1) dev_dbg(dev, "%s: 8723BU 2T not supported\n", __func__); rtl8xxxu_gen2_prepare_calibrate(priv, 0); } static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; u32 val32; int count, ret = 0; /* Turn off RF */ rtl8xxxu_write8(priv, REG_RF_CTRL, 0); /* Enable rising edge triggering interrupt */ val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM); val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ; rtl8xxxu_write16(priv, REG_GPIO_INTM, val16); /* Release WLON reset 0x04[16]= 1*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 |= APS_FSMCO_WLON_RESET; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* 0x0005[1] = 1 turn off MAC by HW state machine*/ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); if ((val8 & BIT(1)) == 0) break; udelay(10); } if (!count) { dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n", __func__); ret = -EBUSY; goto exit; } /* Enable BT control XTAL setting */ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC); val8 &= ~AFE_MISC_WL_XTAL_CTRL; rtl8xxxu_write8(priv, REG_AFE_MISC, val8); /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); val8 |= SYS_ISO_ANALOG_IPS; rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); /* 0x0020[0] = 0 disable LDOA12 MACRO block*/ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); val8 &= ~LDOA15_ENABLE; rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); exit: return ret; } static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv) { u8 val8; u32 val32; int count, ret = 0; /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); val8 |= LDOA15_ENABLE; rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/ val8 = rtl8xxxu_read8(priv, 0x0067); val8 &= ~BIT(4); rtl8xxxu_write8(priv, 0x0067, val8); mdelay(1); /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); val8 &= ~SYS_ISO_ANALOG_IPS; rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); /* Disable SW LPS 0x04[10]= 0 */ val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO); val32 &= ~APS_FSMCO_SW_LPS; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Wait until 0x04[17] = 1 power ready */ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if (val32 & BIT(17)) break; udelay(10); } if (!count) { ret = -EBUSY; goto exit; } /* We should be able to optimize the following three entries into one */ /* Release WLON reset 0x04[16]= 1*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 |= APS_FSMCO_WLON_RESET; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Disable HWPDN 0x04[15]= 0*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 &= ~APS_FSMCO_HW_POWERDOWN; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Disable WL suspend*/ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE); rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); /* Set, then poll until 0 */ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); val32 |= APS_FSMCO_MAC_ENABLE; rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) { ret = 0; break; } udelay(10); } if (!count) { ret = -EBUSY; goto exit; } /* Enable WL control XTAL setting */ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC); val8 |= AFE_MISC_WL_XTAL_CTRL; rtl8xxxu_write8(priv, REG_AFE_MISC, val8); /* Enable falling edge triggering interrupt */ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8); /* Enable GPIO9 interrupt mode */ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8); /* Enable GPIO9 input mode */ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2); val8 &= ~BIT(1); rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8); /* Enable HSISR GPIO[C:0] interrupt */ val8 = rtl8xxxu_read8(priv, REG_HSIMR); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_HSIMR, val8); /* Enable HSISR GPIO9 interrupt */ val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2); val8 |= BIT(1); rtl8xxxu_write8(priv, REG_HSIMR + 2, val8); val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL); val8 |= MULTI_WIFI_HW_ROF_EN; rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8); /* For GPIO9 internal pull high setting BIT(14) */ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1); val8 |= BIT(6); rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8); exit: return ret; } static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; u32 val32; int ret; rtl8xxxu_disabled_to_emu(priv); ret = rtl8723b_emu_to_active(priv); if (ret) goto exit; /* * Enable MAC DMA/WMAC/SCHEDULE/SEC block * Set CR bit10 to enable 32k calibration. */ val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE); rtl8xxxu_write16(priv, REG_CR, val16); /* * BT coexist power on settings. This is identical for 1 and 2 * antenna parts. */ rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20); val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18); rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); /* Antenna inverse */ rtl8xxxu_write8(priv, 0xfe08, 0x01); val16 = rtl8xxxu_read16(priv, REG_PWR_DATA); val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; rtl8xxxu_write16(priv, REG_PWR_DATA, val16); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 |= LEDCFG0_DPDT_SELECT; rtl8xxxu_write32(priv, REG_LEDCFG0, val32); val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA; rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); exit: return ret; } static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; rtl8xxxu_flush_fifo(priv); /* * Disable TX report timer */ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE; rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); rtl8xxxu_write8(priv, REG_CR, 0x0000); rtl8xxxu_active_to_lps(priv); /* Reset Firmware if running in RAM */ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL) rtl8xxxu_firmware_self_reset(priv); /* Reset MCU */ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 &= ~SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); /* Reset MCU ready status */ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); rtl8723bu_active_to_emu(priv); val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 |= BIT(3); /* APS_FSMCO_HW_SUSPEND */ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8); } static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) { struct h2c_cmd h2c; u32 val32; u8 val8; val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); val32 |= (BIT(22) | BIT(23)); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); /* * No indication anywhere as to what 0x0790 does. The 2 antenna * vendor code preserves bits 6-7 here. */ rtl8xxxu_write8(priv, 0x0790, 0x05); /* * 0x0778 seems to be related to enabling the number of antennas * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01 */ rtl8xxxu_write8(priv, 0x0778, 0x01); val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); val8 |= BIT(5); rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780); rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */ /* * Set BT grant to low */ memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_grant.cmd = H2C_8723B_BT_GRANT; h2c.bt_grant.data = 0; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant)); /* * WLAN action by PTA */ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x0c); /* * BT select S0/S1 controlled by WiFi */ val8 = rtl8xxxu_read8(priv, 0x0067); val8 |= BIT(5); rtl8xxxu_write8(priv, 0x0067, val8); val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; rtl8xxxu_write32(priv, REG_PWR_DATA, val32); /* * Bits 6/7 are marked in/out ... but for what? */ rtl8xxxu_write8(priv, 0x0974, 0xff); val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); val32 |= (BIT(0) | BIT(1)); rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); val32 &= ~BIT(24); val32 |= BIT(23); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); /* * Fix external switch Main->S1, Aux->S0 */ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV; h2c.ant_sel_rsv.ant_inverse = 1; h2c.ant_sel_rsv.int_switch_type = 0; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv)); /* * Different settings per different antenna position. * Antenna Position: | Normal Inverse * -------------------------------------------------- * Antenna switch to BT: | 0x280, 0x00 * Antenna switch to WiFi: | 0x0, 0x280 * Antenna switch to PTA: | 0x200, 0x80 */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x80); /* * Software control, antenna at WiFi side */ rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff); rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_info.cmd = H2C_8723B_BT_INFO; h2c.bt_info.data = BIT(0); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info)); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT; h2c.ignore_wlan.data = 0; rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); } static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv) { u32 agg_rx; u8 agg_ctrl; /* * For now simply disable RX aggregation */ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL); agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN; agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH); agg_rx &= ~RXDMA_USB_AGG_ENABLE; agg_rx &= ~0xff0f; rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx); } static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv) { u32 val32; /* Time duration for NHM unit: 4us, 0x2710=40ms */ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710); rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff); rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52); rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff); /* TH8 */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 |= 0xff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Enable CCK */ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B); val32 |= BIT(8) | BIT(9) | BIT(10); rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32); /* Max power amongst all RX antennas */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC); val32 |= BIT(7); rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_stats *phy_stats) { u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a; s8 rx_pwr_all = 0x00; u8 vga_idx, lna_idx; lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK); vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK); switch (lna_idx) { case 6: rx_pwr_all = -34 - (2 * vga_idx); break; case 4: rx_pwr_all = -14 - (2 * vga_idx); break; case 1: rx_pwr_all = 6 - (2 * vga_idx); break; case 0: rx_pwr_all = 16 - (2 * vga_idx); break; default: break; } return rx_pwr_all; } static int rtl8723bu_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rtl8xxxu_priv *priv = container_of(led_cdev, struct rtl8xxxu_priv, led_cdev); u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2); ledcfg &= LEDCFG2_DPDT_SELECT; if (brightness == LED_OFF) ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE; else if (brightness == LED_ON) ledcfg |= LEDCFG2_SW_LED_CONTROL; else if (brightness == RTL8XXXU_HW_LED_CONTROL) ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE; rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg); return 0; } struct rtl8xxxu_fileops rtl8723bu_fops = { .identify_chip = rtl8723bu_identify_chip, .parse_efuse = rtl8723bu_parse_efuse, .load_firmware = rtl8723bu_load_firmware, .power_on = rtl8723bu_power_on, .power_off = rtl8723bu_power_off, .read_efuse = rtl8xxxu_read_efuse, .reset_8051 = rtl8723bu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .init_phy_bb = rtl8723bu_init_phy_bb, .init_phy_rf = rtl8723bu_init_phy_rf, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_lc_calibrate = rtl8723a_phy_lc_calibrate, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8xxxu_gen2_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .parse_phystats = rtl8723au_rx_parse_phystats, .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, .init_burst = rtl8xxxu_init_burst, .enable_rf = rtl8723b_enable_rf, .disable_rf = rtl8xxxu_gen2_disable_rf, .usb_quirks = rtl8xxxu_gen2_usb_quirks, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, .report_connect = rtl8xxxu_gen2_report_connect, .report_rssi = rtl8xxxu_gen2_report_rssi, .fill_txdesc = rtl8xxxu_fill_txdesc_v2, .set_crystal_cap = rtl8723a_set_crystal_cap, .cck_rssi = rtl8723b_cck_rssi, .led_classdev_brightness_set = rtl8723bu_led_brightness_set, .writeN_block_size = 1024, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), .has_s0s1 = 1, .has_tx_report = 1, .gen2_thermal_meter = 1, .needs_full_init = 1, .init_reg_hmtfr = 1, .ampdu_max_time = 0x5e, .ustime_tsf_edca = 0x50, .max_aggr_num = 0x0c14, .supports_ap = 1, .max_macid_num = 128, .max_sec_cam_num = 64, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, .adda_2t_path_on_b = 0x01c00014, .trxff_boundary = 0x3f7f, .pbp_rx = PBP_PAGE_SIZE_256, .pbp_tx = PBP_PAGE_SIZE_256, .mactable = rtl8723b_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM_8723B, .page_num_hi = TX_PAGE_NUM_HI_PQ_8723B, .page_num_lo = TX_PAGE_NUM_LO_PQ_8723B, .page_num_norm = TX_PAGE_NUM_NORM_PQ_8723B, }; |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Jeilin JL2005B/C/D library * * Copyright (C) 2011 Theodore Kilgore <kilgota@auburn.edu> */ #define MODULE_NAME "jl2005bcd" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("JL2005B/C/D USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define JL2005C_CMD_TIMEOUT 500 #define JL2005C_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define JL2005C_MAX_TRANSFER 0x200 #define FRAME_HEADER_LEN 16 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned char firmware_id[6]; const struct v4l2_pix_format *cap_mode; /* Driver stuff */ struct work_struct work_struct; u8 frame_brightness; int block_size; /* block size of camera */ int vga; /* 1 if vga cam, 0 if cif cam */ }; /* Camera has two resolution settings. What they are depends on model. */ static const struct v4l2_pix_format cif_mode[] = { {176, 144, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, {352, 288, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, {640, 480, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /* * cam uses endpoint 0x03 to send commands, 0x84 for read commands, * and 0x82 for bulk data transfer. */ /* All commands are two bytes only */ static int jl2005c_write2(struct gspca_dev *gspca_dev, unsigned char *command) { int retval; memcpy(gspca_dev->usb_buf, command, 2); retval = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 3), gspca_dev->usb_buf, 2, NULL, 500); if (retval < 0) pr_err("command write [%02x] error %d\n", gspca_dev->usb_buf[0], retval); return retval; } /* Response to a command is one byte in usb_buf[0], only if requested. */ static int jl2005c_read1(struct gspca_dev *gspca_dev) { int retval; retval = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x84), gspca_dev->usb_buf, 1, NULL, 500); if (retval < 0) pr_err("read command [0x%02x] error %d\n", gspca_dev->usb_buf[0], retval); return retval; } /* Response appears in gspca_dev->usb_buf[0] */ static int jl2005c_read_reg(struct gspca_dev *gspca_dev, unsigned char reg) { int retval; static u8 instruction[2] = {0x95, 0x00}; /* put register to read in byte 1 */ instruction[1] = reg; /* Send the read request */ retval = jl2005c_write2(gspca_dev, instruction); if (retval < 0) return retval; retval = jl2005c_read1(gspca_dev); return retval; } static int jl2005c_start_new_frame(struct gspca_dev *gspca_dev) { int i; int retval; int frame_brightness = 0; static u8 instruction[2] = {0x7f, 0x01}; retval = jl2005c_write2(gspca_dev, instruction); if (retval < 0) return retval; i = 0; while (i < 20 && !frame_brightness) { /* If we tried 20 times, give up. */ retval = jl2005c_read_reg(gspca_dev, 0x7e); if (retval < 0) return retval; frame_brightness = gspca_dev->usb_buf[0]; retval = jl2005c_read_reg(gspca_dev, 0x7d); if (retval < 0) return retval; i++; } gspca_dbg(gspca_dev, D_FRAM, "frame_brightness is 0x%02x\n", gspca_dev->usb_buf[0]); return retval; } static int jl2005c_write_reg(struct gspca_dev *gspca_dev, unsigned char reg, unsigned char value) { int retval; u8 instruction[2]; instruction[0] = reg; instruction[1] = value; retval = jl2005c_write2(gspca_dev, instruction); if (retval < 0) return retval; return retval; } static int jl2005c_get_firmware_id(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; int i = 0; int retval; static const unsigned char regs_to_read[] = { 0x57, 0x02, 0x03, 0x5d, 0x5e, 0x5f }; gspca_dbg(gspca_dev, D_PROBE, "Running jl2005c_get_firmware_id\n"); /* Read the first ID byte once for warmup */ retval = jl2005c_read_reg(gspca_dev, regs_to_read[0]); gspca_dbg(gspca_dev, D_PROBE, "response is %02x\n", gspca_dev->usb_buf[0]); if (retval < 0) return retval; /* Now actually get the ID string */ for (i = 0; i < 6; i++) { retval = jl2005c_read_reg(gspca_dev, regs_to_read[i]); if (retval < 0) return retval; sd->firmware_id[i] = gspca_dev->usb_buf[0]; } gspca_dbg(gspca_dev, D_PROBE, "firmware ID is %02x%02x%02x%02x%02x%02x\n", sd->firmware_id[0], sd->firmware_id[1], sd->firmware_id[2], sd->firmware_id[3], sd->firmware_id[4], sd->firmware_id[5]); return 0; } static int jl2005c_stream_start_vga_lg (struct gspca_dev *gspca_dev) { int i; int retval = -1; static u8 instruction[][2] = { {0x05, 0x00}, {0x7c, 0x00}, {0x7d, 0x18}, {0x02, 0x00}, {0x01, 0x00}, {0x04, 0x52}, }; for (i = 0; i < ARRAY_SIZE(instruction); i++) { msleep(60); retval = jl2005c_write2(gspca_dev, instruction[i]); if (retval < 0) return retval; } msleep(60); return retval; } static int jl2005c_stream_start_vga_small(struct gspca_dev *gspca_dev) { int i; int retval = -1; static u8 instruction[][2] = { {0x06, 0x00}, {0x7c, 0x00}, {0x7d, 0x1a}, {0x02, 0x00}, {0x01, 0x00}, {0x04, 0x52}, }; for (i = 0; i < ARRAY_SIZE(instruction); i++) { msleep(60); retval = jl2005c_write2(gspca_dev, instruction[i]); if (retval < 0) return retval; } msleep(60); return retval; } static int jl2005c_stream_start_cif_lg(struct gspca_dev *gspca_dev) { int i; int retval = -1; static u8 instruction[][2] = { {0x05, 0x00}, {0x7c, 0x00}, {0x7d, 0x30}, {0x02, 0x00}, {0x01, 0x00}, {0x04, 0x42}, }; for (i = 0; i < ARRAY_SIZE(instruction); i++) { msleep(60); retval = jl2005c_write2(gspca_dev, instruction[i]); if (retval < 0) return retval; } msleep(60); return retval; } static int jl2005c_stream_start_cif_small(struct gspca_dev *gspca_dev) { int i; int retval = -1; static u8 instruction[][2] = { {0x06, 0x00}, {0x7c, 0x00}, {0x7d, 0x32}, {0x02, 0x00}, {0x01, 0x00}, {0x04, 0x42}, }; for (i = 0; i < ARRAY_SIZE(instruction); i++) { msleep(60); retval = jl2005c_write2(gspca_dev, instruction[i]); if (retval < 0) return retval; } msleep(60); return retval; } static int jl2005c_stop(struct gspca_dev *gspca_dev) { return jl2005c_write_reg(gspca_dev, 0x07, 0x00); } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void jl2005c_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left = 0; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int header_read = 0; unsigned char header_sig[2] = {0x4a, 0x4c}; int act_len; int packet_type; int ret; u8 *buffer; buffer = kmalloc(JL2005C_MAX_TRANSFER, GFP_KERNEL); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* Check if this is a new frame. If so, start the frame first */ if (!header_read) { mutex_lock(&gspca_dev->usb_lock); ret = jl2005c_start_new_frame(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) goto quit_stream; ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x82), buffer, JL2005C_MAX_TRANSFER, &act_len, JL2005C_DATA_TIMEOUT); gspca_dbg(gspca_dev, D_PACK, "Got %d bytes out of %d for header\n", act_len, JL2005C_MAX_TRANSFER); if (ret < 0 || act_len < JL2005C_MAX_TRANSFER) goto quit_stream; /* Check whether we actually got the first blodk */ if (memcmp(header_sig, buffer, 2) != 0) { pr_err("First block is not the first block\n"); goto quit_stream; } /* total size to fetch is byte 7, times blocksize * of which we already got act_len */ bytes_left = buffer[0x07] * dev->block_size - act_len; gspca_dbg(gspca_dev, D_PACK, "bytes_left = 0x%x\n", bytes_left); /* We keep the header. It has other information, too.*/ packet_type = FIRST_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, act_len); header_read = 1; } while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > JL2005C_MAX_TRANSFER ? JL2005C_MAX_TRANSFER : bytes_left; ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x82), buffer, data_len, &act_len, JL2005C_DATA_TIMEOUT); if (ret < 0 || act_len < data_len) goto quit_stream; gspca_dbg(gspca_dev, D_PACK, "Got %d bytes out of %d for frame\n", data_len, bytes_left); bytes_left -= data_len; if (bytes_left == 0) { packet_type = LAST_PACKET; header_read = 0; } else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, data_len); } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); jl2005c_stop(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam; struct sd *sd = (struct sd *) gspca_dev; cam = &gspca_dev->cam; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk_size = 64; cam->bulk = 1; /* For the rest, the camera needs to be detected */ jl2005c_get_firmware_id(gspca_dev); /* Here are some known firmware IDs * First some JL2005B cameras * {0x41, 0x07, 0x04, 0x2c, 0xe8, 0xf2} Sakar KidzCam * {0x45, 0x02, 0x08, 0xb9, 0x00, 0xd2} No-name JL2005B * JL2005C cameras * {0x01, 0x0c, 0x16, 0x10, 0xf8, 0xc8} Argus DC-1512 * {0x12, 0x04, 0x03, 0xc0, 0x00, 0xd8} ICarly * {0x86, 0x08, 0x05, 0x02, 0x00, 0xd4} Jazz * * Based upon this scanty evidence, we can detect a CIF camera by * testing byte 0 for 0x4x. */ if ((sd->firmware_id[0] & 0xf0) == 0x40) { cam->cam_mode = cif_mode; cam->nmodes = ARRAY_SIZE(cif_mode); sd->block_size = 0x80; } else { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); sd->block_size = 0x200; } INIT_WORK(&sd->work_struct, jl2005c_dostream); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->cap_mode = gspca_dev->cam.cam_mode; switch (gspca_dev->pixfmt.width) { case 640: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at vga resolution\n"); jl2005c_stream_start_vga_lg(gspca_dev); break; case 320: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at qvga resolution\n"); jl2005c_stream_start_vga_small(gspca_dev); break; case 352: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at cif resolution\n"); jl2005c_stream_start_cif_lg(gspca_dev); break; case 176: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at qcif resolution\n"); jl2005c_stream_start_cif_small(gspca_dev); break; default: pr_err("Unknown resolution specified\n"); return -1; } schedule_work(&sd->work_struct); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905c_dostream to finish */ flush_work(&dev->work_struct); mutex_lock(&gspca_dev->usb_lock); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0227)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
6 15 7 7 15 5 5 5 5 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 11 11 11 6 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 5 6 6 6 6 11 6 10 10 11 11 11 6 11 11 6 11 11 11 6 6 6 6 6 1 1 5 6 6 6 6 6 11 11 11 5 6 6 11 5 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 11 11 11 11 5 5 6 6 6 6 6 6 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 11 11 11 2 11 11 11 11 11 11 11 11 11 11 15 7 15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 | /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2024 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * DOC: Wireless regulatory infrastructure * * The usual implementation is for a driver to read a device EEPROM to * determine which regulatory domain it should be operating under, then * looking up the allowable channels in a driver-local table and finally * registering those channels in the wiphy structure. * * Another set of compliance enforcement is for drivers to use their * own compliance limits which can be stored on the EEPROM. The host * driver or firmware may ensure these are used. * * In addition to all this we provide an extra layer of regulatory * conformance. For drivers which do not have any regulatory * information CRDA provides the complete regulatory solution. * For others it provides a community effort on further restrictions * to enhance compliance. * * Note: When number of rules --> infinity we will not be able to * index on alpha2 any more, instead we'll probably have to * rely on some SHA1 checksum of the regdomain for example. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/nl80211.h> #include <linux/platform_device.h> #include <linux/verification.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/units.h> #include <net/cfg80211.h> #include "core.h" #include "reg.h" #include "rdev-ops.h" #include "nl80211.h" /* * Grace period we give before making sure all current interfaces reside on * channels allowed by the current regulatory domain. */ #define REG_ENFORCE_GRACE_MS 60000 /** * enum reg_request_treatment - regulatory request treatment * * @REG_REQ_OK: continue processing the regulatory request * @REG_REQ_IGNORE: ignore the regulatory request * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should * be intersected with the current one. * @REG_REQ_ALREADY_SET: the regulatory request will not change the current * regulatory settings, and no further processing is required. */ enum reg_request_treatment { REG_REQ_OK, REG_REQ_IGNORE, REG_REQ_INTERSECT, REG_REQ_ALREADY_SET, }; static struct regulatory_request core_request_world = { .initiator = NL80211_REGDOM_SET_BY_CORE, .alpha2[0] = '0', .alpha2[1] = '0', .intersect = false, .processed = true, .country_ie_env = ENVIRON_ANY, }; /* * Receipt of information from last regulatory request, * protected by RTNL (and can be accessed with RCU protection) */ static struct regulatory_request __rcu *last_request = (void __force __rcu *)&core_request_world; /* To trigger userspace events and load firmware */ static struct platform_device *reg_pdev; /* * Central wireless core regulatory domains, we only need two, * the current one and a world regulatory domain in case we have no * information to give us an alpha2. * (protected by RTNL, can be read under RCU) */ const struct ieee80211_regdomain __rcu *cfg80211_regdomain; /* * Number of devices that registered to the core * that support cellular base station regulatory hints * (protected by RTNL) */ static int reg_num_devs_support_basehint; /* * State variable indicating if the platform on which the devices * are attached is operating in an indoor environment. The state variable * is relevant for all registered devices. */ static bool reg_is_indoor; static DEFINE_SPINLOCK(reg_indoor_lock); /* Used to track the userspace process controlling the indoor setting */ static u32 reg_is_indoor_portid; static void restore_regulatory_settings(bool reset_user, bool cached); static void print_regdomain(const struct ieee80211_regdomain *rd); static void reg_process_hint(struct regulatory_request *reg_request); static const struct ieee80211_regdomain *get_cfg80211_regdom(void) { return rcu_dereference_rtnl(cfg80211_regdomain); } /* * Returns the regulatory domain associated with the wiphy. * * Requires any of RTNL, wiphy mutex or RCU protection. */ const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy) { return rcu_dereference_check(wiphy->regd, lockdep_is_held(&wiphy->mtx) || lockdep_rtnl_is_held()); } EXPORT_SYMBOL(get_wiphy_regdom); static const char *reg_dfs_region_str(enum nl80211_dfs_regions dfs_region) { switch (dfs_region) { case NL80211_DFS_UNSET: return "unset"; case NL80211_DFS_FCC: return "FCC"; case NL80211_DFS_ETSI: return "ETSI"; case NL80211_DFS_JP: return "JP"; } return "Unknown"; } enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy) { const struct ieee80211_regdomain *regd = NULL; const struct ieee80211_regdomain *wiphy_regd = NULL; enum nl80211_dfs_regions dfs_region; rcu_read_lock(); regd = get_cfg80211_regdom(); dfs_region = regd->dfs_region; if (!wiphy) goto out; wiphy_regd = get_wiphy_regdom(wiphy); if (!wiphy_regd) goto out; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { dfs_region = wiphy_regd->dfs_region; goto out; } if (wiphy_regd->dfs_region == regd->dfs_region) goto out; pr_debug("%s: device specific dfs_region (%s) disagrees with cfg80211's central dfs_region (%s)\n", dev_name(&wiphy->dev), reg_dfs_region_str(wiphy_regd->dfs_region), reg_dfs_region_str(regd->dfs_region)); out: rcu_read_unlock(); return dfs_region; } static void rcu_free_regdom(const struct ieee80211_regdomain *r) { if (!r) return; kfree_rcu((struct ieee80211_regdomain *)r, rcu_head); } static struct regulatory_request *get_last_request(void) { return rcu_dereference_rtnl(last_request); } /* Used to queue up regulatory hints */ static LIST_HEAD(reg_requests_list); static DEFINE_SPINLOCK(reg_requests_lock); /* Used to queue up beacon hints for review */ static LIST_HEAD(reg_pending_beacons); static DEFINE_SPINLOCK(reg_pending_beacons_lock); /* Used to keep track of processed beacon hints */ static LIST_HEAD(reg_beacon_list); struct reg_beacon { struct list_head list; struct ieee80211_channel chan; }; static void reg_check_chans_work(struct work_struct *work); static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work); static void reg_todo(struct work_struct *work); static DECLARE_WORK(reg_work, reg_todo); /* We keep a static world regulatory domain in case of the absence of CRDA */ static const struct ieee80211_regdomain world_regdom = { .n_reg_rules = 8, .alpha2 = "00", .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), /* IEEE 802.11b/g, channels 12..13. */ REG_RULE(2467-10, 2472+10, 20, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW), /* IEEE 802.11 channel 14 - Only JP enables * this and for 802.11b only */ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM), /* IEEE 802.11a, channel 36..48 */ REG_RULE(5180-10, 5240+10, 80, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW), /* IEEE 802.11a, channel 52..64 - DFS required */ REG_RULE(5260-10, 5320+10, 80, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW | NL80211_RRF_DFS), /* IEEE 802.11a, channel 100..144 - DFS required */ REG_RULE(5500-10, 5720+10, 160, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_DFS), /* IEEE 802.11a, channel 149..165 */ REG_RULE(5745-10, 5825+10, 80, 6, 20, NL80211_RRF_NO_IR), /* IEEE 802.11ad (60GHz), channels 1..3 */ REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0), } }; /* protected by RTNL */ static const struct ieee80211_regdomain *cfg80211_world_regdom = &world_regdom; static char *ieee80211_regdom = "00"; static char user_alpha2[2]; static const struct ieee80211_regdomain *cfg80211_user_regdom; module_param(ieee80211_regdom, charp, 0444); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); static void reg_free_request(struct regulatory_request *request) { if (request == &core_request_world) return; if (request != get_last_request()) kfree(request); } static void reg_free_last_request(void) { struct regulatory_request *lr = get_last_request(); if (lr != &core_request_world && lr) kfree_rcu(lr, rcu_head); } static void reg_update_last_request(struct regulatory_request *request) { struct regulatory_request *lr; lr = get_last_request(); if (lr == request) return; reg_free_last_request(); rcu_assign_pointer(last_request, request); } static void reset_regdomains(bool full_reset, const struct ieee80211_regdomain *new_regdom) { const struct ieee80211_regdomain *r; ASSERT_RTNL(); r = get_cfg80211_regdom(); /* avoid freeing static information or freeing something twice */ if (r == cfg80211_world_regdom) r = NULL; if (cfg80211_world_regdom == &world_regdom) cfg80211_world_regdom = NULL; if (r == &world_regdom) r = NULL; rcu_free_regdom(r); rcu_free_regdom(cfg80211_world_regdom); cfg80211_world_regdom = &world_regdom; rcu_assign_pointer(cfg80211_regdomain, new_regdom); if (!full_reset) return; reg_update_last_request(&core_request_world); } /* * Dynamic world regulatory domain requested by the wireless * core upon initialization */ static void update_world_regdomain(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr; lr = get_last_request(); WARN_ON(!lr); reset_regdomains(false, rd); cfg80211_world_regdom = rd; } bool is_world_regdom(const char *alpha2) { if (!alpha2) return false; return alpha2[0] == '0' && alpha2[1] == '0'; } static bool is_alpha2_set(const char *alpha2) { if (!alpha2) return false; return alpha2[0] && alpha2[1]; } static bool is_unknown_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain was built by driver * but a specific alpha2 cannot be determined */ return alpha2[0] == '9' && alpha2[1] == '9'; } static bool is_intersected_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain is the * result of an intersection between two regulatory domain * structures */ return alpha2[0] == '9' && alpha2[1] == '8'; } static bool is_an_alpha2(const char *alpha2) { if (!alpha2) return false; return isalpha(alpha2[0]) && isalpha(alpha2[1]); } static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) { if (!alpha2_x || !alpha2_y) return false; return alpha2_x[0] == alpha2_y[0] && alpha2_x[1] == alpha2_y[1]; } static bool regdom_changes(const char *alpha2) { const struct ieee80211_regdomain *r = get_cfg80211_regdom(); if (!r) return true; return !alpha2_equal(r->alpha2, alpha2); } /* * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER * has ever been issued. */ static bool is_user_regdom_saved(void) { if (user_alpha2[0] == '9' && user_alpha2[1] == '7') return false; /* This would indicate a mistake on the design */ if (WARN(!is_world_regdom(user_alpha2) && !is_an_alpha2(user_alpha2), "Unexpected user alpha2: %c%c\n", user_alpha2[0], user_alpha2[1])) return false; return true; } static const struct ieee80211_regdomain * reg_copy_regd(const struct ieee80211_regdomain *src_regd) { struct ieee80211_regdomain *regd; unsigned int i; regd = kzalloc(struct_size(regd, reg_rules, src_regd->n_reg_rules), GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); for (i = 0; i < src_regd->n_reg_rules; i++) memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], sizeof(struct ieee80211_reg_rule)); return regd; } static void cfg80211_save_user_regdom(const struct ieee80211_regdomain *rd) { ASSERT_RTNL(); if (!IS_ERR(cfg80211_user_regdom)) kfree(cfg80211_user_regdom); cfg80211_user_regdom = reg_copy_regd(rd); } struct reg_regdb_apply_request { struct list_head list; const struct ieee80211_regdomain *regdom; }; static LIST_HEAD(reg_regdb_apply_list); static DEFINE_MUTEX(reg_regdb_apply_mutex); static void reg_regdb_apply(struct work_struct *work) { struct reg_regdb_apply_request *request; rtnl_lock(); mutex_lock(®_regdb_apply_mutex); while (!list_empty(®_regdb_apply_list)) { request = list_first_entry(®_regdb_apply_list, struct reg_regdb_apply_request, list); list_del(&request->list); set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB); kfree(request); } mutex_unlock(®_regdb_apply_mutex); rtnl_unlock(); } static DECLARE_WORK(reg_regdb_work, reg_regdb_apply); static int reg_schedule_apply(const struct ieee80211_regdomain *regdom) { struct reg_regdb_apply_request *request; request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL); if (!request) { kfree(regdom); return -ENOMEM; } request->regdom = regdom; mutex_lock(®_regdb_apply_mutex); list_add_tail(&request->list, ®_regdb_apply_list); mutex_unlock(®_regdb_apply_mutex); schedule_work(®_regdb_work); return 0; } #ifdef CONFIG_CFG80211_CRDA_SUPPORT /* Max number of consecutive attempts to communicate with CRDA */ #define REG_MAX_CRDA_TIMEOUTS 10 static u32 reg_crda_timeouts; static void crda_timeout_work(struct work_struct *work); static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work); static void crda_timeout_work(struct work_struct *work) { pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n"); rtnl_lock(); reg_crda_timeouts++; restore_regulatory_settings(true, false); rtnl_unlock(); } static void cancel_crda_timeout(void) { cancel_delayed_work(&crda_timeout); } static void cancel_crda_timeout_sync(void) { cancel_delayed_work_sync(&crda_timeout); } static void reset_crda_timeouts(void) { reg_crda_timeouts = 0; } /* * This lets us keep regulatory code which is updated on a regulatory * basis in userspace. */ static int call_crda(const char *alpha2) { char country[12]; char *env[] = { country, NULL }; int ret; snprintf(country, sizeof(country), "COUNTRY=%c%c", alpha2[0], alpha2[1]); if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); return -EINVAL; } if (!is_world_regdom((char *) alpha2)) pr_debug("Calling CRDA for country: %c%c\n", alpha2[0], alpha2[1]); else pr_debug("Calling CRDA to update world regulatory domain\n"); ret = kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env); if (ret) return ret; queue_delayed_work(system_power_efficient_wq, &crda_timeout, msecs_to_jiffies(3142)); return 0; } #else static inline void cancel_crda_timeout(void) {} static inline void cancel_crda_timeout_sync(void) {} static inline void reset_crda_timeouts(void) {} static inline int call_crda(const char *alpha2) { return -ENODATA; } #endif /* CONFIG_CFG80211_CRDA_SUPPORT */ /* code to directly load a firmware database through request_firmware */ static const struct fwdb_header *regdb; struct fwdb_country { u8 alpha2[2]; __be16 coll_ptr; /* this struct cannot be extended */ } __packed __aligned(4); struct fwdb_collection { u8 len; u8 n_rules; u8 dfs_region; /* no optional data yet */ /* aligned to 2, then followed by __be16 array of rule pointers */ } __packed __aligned(4); enum fwdb_flags { FWDB_FLAG_NO_OFDM = BIT(0), FWDB_FLAG_NO_OUTDOOR = BIT(1), FWDB_FLAG_DFS = BIT(2), FWDB_FLAG_NO_IR = BIT(3), FWDB_FLAG_AUTO_BW = BIT(4), }; struct fwdb_wmm_ac { u8 ecw; u8 aifsn; __be16 cot; } __packed; struct fwdb_wmm_rule { struct fwdb_wmm_ac client[IEEE80211_NUM_ACS]; struct fwdb_wmm_ac ap[IEEE80211_NUM_ACS]; } __packed; struct fwdb_rule { u8 len; u8 flags; __be16 max_eirp; __be32 start, end, max_bw; /* start of optional data */ __be16 cac_timeout; __be16 wmm_ptr; } __packed __aligned(4); #define FWDB_MAGIC 0x52474442 #define FWDB_VERSION 20 struct fwdb_header { __be32 magic; __be32 version; struct fwdb_country country[]; } __packed __aligned(4); static int ecw2cw(int ecw) { return (1 << ecw) - 1; } static bool valid_wmm(struct fwdb_wmm_rule *rule) { struct fwdb_wmm_ac *ac = (struct fwdb_wmm_ac *)rule; int i; for (i = 0; i < IEEE80211_NUM_ACS * 2; i++) { u16 cw_min = ecw2cw((ac[i].ecw & 0xf0) >> 4); u16 cw_max = ecw2cw(ac[i].ecw & 0x0f); u8 aifsn = ac[i].aifsn; if (cw_min >= cw_max) return false; if (aifsn < 1) return false; } return true; } static bool valid_rule(const u8 *data, unsigned int size, u16 rule_ptr) { struct fwdb_rule *rule = (void *)(data + (rule_ptr << 2)); if ((u8 *)rule + sizeof(rule->len) > data + size) return false; /* mandatory fields */ if (rule->len < offsetofend(struct fwdb_rule, max_bw)) return false; if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; struct fwdb_wmm_rule *wmm; if (wmm_ptr + sizeof(struct fwdb_wmm_rule) > size) return false; wmm = (void *)(data + wmm_ptr); if (!valid_wmm(wmm)) return false; } return true; } static bool valid_country(const u8 *data, unsigned int size, const struct fwdb_country *country) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)(data + ptr); __be16 *rules_ptr; unsigned int i; /* make sure we can read len/n_rules */ if ((u8 *)coll + offsetofend(typeof(*coll), n_rules) > data + size) return false; /* make sure base struct and all rules fit */ if ((u8 *)coll + ALIGN(coll->len, 2) + (coll->n_rules * 2) > data + size) return false; /* mandatory fields must exist */ if (coll->len < offsetofend(struct fwdb_collection, dfs_region)) return false; rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); for (i = 0; i < coll->n_rules; i++) { u16 rule_ptr = be16_to_cpu(rules_ptr[i]); if (!valid_rule(data, size, rule_ptr)) return false; } return true; } #ifdef CONFIG_CFG80211_REQUIRE_SIGNED_REGDB #include <keys/asymmetric-type.h> static struct key *builtin_regdb_keys; static int __init load_builtin_regdb_keys(void) { builtin_regdb_keys = keyring_alloc(".builtin_regdb_keys", KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH), KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(builtin_regdb_keys)) return PTR_ERR(builtin_regdb_keys); pr_notice("Loading compiled-in X.509 certificates for regulatory database\n"); #ifdef CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS x509_load_certificate_list(shipped_regdb_certs, shipped_regdb_certs_len, builtin_regdb_keys); #endif #ifdef CONFIG_CFG80211_EXTRA_REGDB_KEYDIR if (CONFIG_CFG80211_EXTRA_REGDB_KEYDIR[0] != '\0') x509_load_certificate_list(extra_regdb_certs, extra_regdb_certs_len, builtin_regdb_keys); #endif return 0; } MODULE_FIRMWARE("regulatory.db.p7s"); static bool regdb_has_valid_signature(const u8 *data, unsigned int size) { const struct firmware *sig; bool result; if (request_firmware(&sig, "regulatory.db.p7s", ®_pdev->dev)) return false; result = verify_pkcs7_signature(data, size, sig->data, sig->size, builtin_regdb_keys, VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL) == 0; release_firmware(sig); return result; } static void free_regdb_keyring(void) { key_put(builtin_regdb_keys); } #else static int load_builtin_regdb_keys(void) { return 0; } static bool regdb_has_valid_signature(const u8 *data, unsigned int size) { return true; } static void free_regdb_keyring(void) { } #endif /* CONFIG_CFG80211_REQUIRE_SIGNED_REGDB */ static bool valid_regdb(const u8 *data, unsigned int size) { const struct fwdb_header *hdr = (void *)data; const struct fwdb_country *country; if (size < sizeof(*hdr)) return false; if (hdr->magic != cpu_to_be32(FWDB_MAGIC)) return false; if (hdr->version != cpu_to_be32(FWDB_VERSION)) return false; if (!regdb_has_valid_signature(data, size)) return false; country = &hdr->country[0]; while ((u8 *)(country + 1) <= data + size) { if (!country->coll_ptr) break; if (!valid_country(data, size, country)) return false; country++; } return true; } static void set_wmm_rule(const struct fwdb_header *db, const struct fwdb_country *country, const struct fwdb_rule *rule, struct ieee80211_reg_rule *rrule) { struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule; struct fwdb_wmm_rule *wmm; unsigned int i, wmm_ptr; wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; wmm = (void *)((u8 *)db + wmm_ptr); if (!valid_wmm(wmm)) { pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n", be32_to_cpu(rule->start), be32_to_cpu(rule->end), country->alpha2[0], country->alpha2[1]); return; } for (i = 0; i < IEEE80211_NUM_ACS; i++) { wmm_rule->client[i].cw_min = ecw2cw((wmm->client[i].ecw & 0xf0) >> 4); wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); wmm_rule->client[i].aifsn = wmm->client[i].aifsn; wmm_rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot); wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn; wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); } rrule->has_wmm = true; } static int __regdb_query_wmm(const struct fwdb_header *db, const struct fwdb_country *country, int freq, struct ieee80211_reg_rule *rrule) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); int i; for (i = 0; i < coll->n_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr); if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr)) continue; if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) && freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) { set_wmm_rule(db, country, rule, rrule); return 0; } } return -ENODATA; } int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) { const struct fwdb_header *hdr = regdb; const struct fwdb_country *country; if (!regdb) return -ENODATA; if (IS_ERR(regdb)) return PTR_ERR(regdb); country = &hdr->country[0]; while (country->coll_ptr) { if (alpha2_equal(alpha2, country->alpha2)) return __regdb_query_wmm(regdb, country, freq, rule); country++; } return -ENODATA; } EXPORT_SYMBOL(reg_query_regdb_wmm); static int regdb_query_country(const struct fwdb_header *db, const struct fwdb_country *country) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); struct ieee80211_regdomain *regdom; unsigned int i; regdom = kzalloc(struct_size(regdom, reg_rules, coll->n_rules), GFP_KERNEL); if (!regdom) return -ENOMEM; regdom->n_reg_rules = coll->n_rules; regdom->alpha2[0] = country->alpha2[0]; regdom->alpha2[1] = country->alpha2[1]; regdom->dfs_region = coll->dfs_region; for (i = 0; i < regdom->n_reg_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr); struct ieee80211_reg_rule *rrule = ®dom->reg_rules[i]; rrule->freq_range.start_freq_khz = be32_to_cpu(rule->start); rrule->freq_range.end_freq_khz = be32_to_cpu(rule->end); rrule->freq_range.max_bandwidth_khz = be32_to_cpu(rule->max_bw); rrule->power_rule.max_antenna_gain = 0; rrule->power_rule.max_eirp = be16_to_cpu(rule->max_eirp); rrule->flags = 0; if (rule->flags & FWDB_FLAG_NO_OFDM) rrule->flags |= NL80211_RRF_NO_OFDM; if (rule->flags & FWDB_FLAG_NO_OUTDOOR) rrule->flags |= NL80211_RRF_NO_OUTDOOR; if (rule->flags & FWDB_FLAG_DFS) rrule->flags |= NL80211_RRF_DFS; if (rule->flags & FWDB_FLAG_NO_IR) rrule->flags |= NL80211_RRF_NO_IR; if (rule->flags & FWDB_FLAG_AUTO_BW) rrule->flags |= NL80211_RRF_AUTO_BW; rrule->dfs_cac_ms = 0; /* handle optional data */ if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout)) rrule->dfs_cac_ms = 1000 * be16_to_cpu(rule->cac_timeout); if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) set_wmm_rule(db, country, rule, rrule); } return reg_schedule_apply(regdom); } static int query_regdb(const char *alpha2) { const struct fwdb_header *hdr = regdb; const struct fwdb_country *country; ASSERT_RTNL(); if (IS_ERR(regdb)) return PTR_ERR(regdb); country = &hdr->country[0]; while (country->coll_ptr) { if (alpha2_equal(alpha2, country->alpha2)) return regdb_query_country(regdb, country); country++; } return -ENODATA; } static void regdb_fw_cb(const struct firmware *fw, void *context) { int set_error = 0; bool restore = true; void *db; if (!fw) { pr_info("failed to load regulatory.db\n"); set_error = -ENODATA; } else if (!valid_regdb(fw->data, fw->size)) { pr_info("loaded regulatory.db is malformed or signature is missing/invalid\n"); set_error = -EINVAL; } rtnl_lock(); if (regdb && !IS_ERR(regdb)) { /* negative case - a bug * positive case - can happen due to race in case of multiple cb's in * queue, due to usage of asynchronous callback * * Either case, just restore and free new db. */ } else if (set_error) { regdb = ERR_PTR(set_error); } else if (fw) { db = kmemdup(fw->data, fw->size, GFP_KERNEL); if (db) { regdb = db; restore = context && query_regdb(context); } else { restore = true; } } if (restore) restore_regulatory_settings(true, false); rtnl_unlock(); kfree(context); release_firmware(fw); } MODULE_FIRMWARE("regulatory.db"); static int query_regdb_file(const char *alpha2) { int err; ASSERT_RTNL(); if (regdb) return query_regdb(alpha2); alpha2 = kmemdup(alpha2, 2, GFP_KERNEL); if (!alpha2) return -ENOMEM; err = request_firmware_nowait(THIS_MODULE, true, "regulatory.db", ®_pdev->dev, GFP_KERNEL, (void *)alpha2, regdb_fw_cb); if (err) kfree(alpha2); return err; } int reg_reload_regdb(void) { const struct firmware *fw; void *db; int err; const struct ieee80211_regdomain *current_regdomain; struct regulatory_request *request; err = request_firmware(&fw, "regulatory.db", ®_pdev->dev); if (err) return err; if (!valid_regdb(fw->data, fw->size)) { err = -ENODATA; goto out; } db = kmemdup(fw->data, fw->size, GFP_KERNEL); if (!db) { err = -ENOMEM; goto out; } rtnl_lock(); if (!IS_ERR_OR_NULL(regdb)) kfree(regdb); regdb = db; /* reset regulatory domain */ current_regdomain = get_cfg80211_regdom(); request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) { err = -ENOMEM; goto out_unlock; } request->wiphy_idx = WIPHY_IDX_INVALID; request->alpha2[0] = current_regdomain->alpha2[0]; request->alpha2[1] = current_regdomain->alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; request->user_reg_hint_type = NL80211_USER_REG_HINT_USER; reg_process_hint(request); out_unlock: rtnl_unlock(); out: release_firmware(fw); return err; } static bool reg_query_database(struct regulatory_request *request) { if (query_regdb_file(request->alpha2) == 0) return true; if (call_crda(request->alpha2) == 0) return true; return false; } bool reg_is_valid_request(const char *alpha2) { struct regulatory_request *lr = get_last_request(); if (!lr || lr->processed) return false; return alpha2_equal(lr->alpha2, alpha2); } static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy) { struct regulatory_request *lr = get_last_request(); /* * Follow the driver's regulatory domain, if present, unless a country * IE has been processed or a user wants to help complaince further */ if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->initiator != NL80211_REGDOM_SET_BY_USER && wiphy->regd) return get_wiphy_regdom(wiphy); return get_cfg80211_regdom(); } static unsigned int reg_get_max_bandwidth_from_range(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; const struct ieee80211_freq_range *freq_range_tmp; const struct ieee80211_reg_rule *tmp; u32 start_freq, end_freq, idx, no; for (idx = 0; idx < rd->n_reg_rules; idx++) if (rule == &rd->reg_rules[idx]) break; if (idx == rd->n_reg_rules) return 0; /* get start_freq */ no = idx; while (no) { tmp = &rd->reg_rules[--no]; freq_range_tmp = &tmp->freq_range; if (freq_range_tmp->end_freq_khz < freq_range->start_freq_khz) break; freq_range = freq_range_tmp; } start_freq = freq_range->start_freq_khz; /* get end_freq */ freq_range = &rule->freq_range; no = idx; while (no < rd->n_reg_rules - 1) { tmp = &rd->reg_rules[++no]; freq_range_tmp = &tmp->freq_range; if (freq_range_tmp->start_freq_khz > freq_range->end_freq_khz) break; freq_range = freq_range_tmp; } end_freq = freq_range->end_freq_khz; return end_freq - start_freq; } unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule) { unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule); if (rule->flags & NL80211_RRF_NO_320MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(160)); if (rule->flags & NL80211_RRF_NO_160MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80)); if (rule->flags & NL80211_RRF_NO_80MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(40)); /* * HT40+/HT40- limits are handled per-channel. Only limit BW if both * are not allowed. */ if (rule->flags & NL80211_RRF_NO_HT40MINUS && rule->flags & NL80211_RRF_NO_HT40PLUS) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(20)); return bw; } /* Sanity check on a regulatory rule */ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; u32 freq_diff; if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0) return false; if (freq_range->start_freq_khz > freq_range->end_freq_khz) return false; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->end_freq_khz <= freq_range->start_freq_khz || freq_range->max_bandwidth_khz > freq_diff) return false; return true; } static bool is_valid_rd(const struct ieee80211_regdomain *rd) { const struct ieee80211_reg_rule *reg_rule = NULL; unsigned int i; if (!rd->n_reg_rules) return false; if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) return false; for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; if (!is_valid_reg_rule(reg_rule)) return false; } return true; } /** * freq_in_rule_band - tells us if a frequency is in a frequency band * @freq_range: frequency rule we want to query * @freq_khz: frequency we are inquiring about * * This lets us know if a specific frequency rule is or is not relevant to * a specific frequency's band. Bands are device specific and artificial * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), * however it is safe for now to assume that a frequency rule should not be * part of a frequency's band if the start freq or end freq are off by more * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the * 60 GHz band. * This resolution can be lowered and should be considered as we add * regulatory rule support for other "bands". * * Returns: whether or not the frequency is in the range */ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, u32 freq_khz) { /* * From 802.11ad: directional multi-gigabit (DMG): * Pertaining to operation in a frequency band containing a channel * with the Channel starting frequency above 45 GHz. */ u32 limit = freq_khz > 45 * KHZ_PER_GHZ ? 20 * KHZ_PER_GHZ : 2 * KHZ_PER_GHZ; if (abs(freq_khz - freq_range->start_freq_khz) <= limit) return true; if (abs(freq_khz - freq_range->end_freq_khz) <= limit) return true; return false; } /* * Later on we can perhaps use the more restrictive DFS * region but we don't have information for that yet so * for now simply disallow conflicts. */ static enum nl80211_dfs_regions reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1, const enum nl80211_dfs_regions dfs_region2) { if (dfs_region1 != dfs_region2) return NL80211_DFS_UNSET; return dfs_region1; } static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1, const struct ieee80211_wmm_ac *wmm_ac2, struct ieee80211_wmm_ac *intersect) { intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min); intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max); intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot); intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn); } /* * Helper for regdom_intersect(), this does the real * mathematical intersection fun */ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2, const struct ieee80211_reg_rule *rule1, const struct ieee80211_reg_rule *rule2, struct ieee80211_reg_rule *intersected_rule) { const struct ieee80211_freq_range *freq_range1, *freq_range2; struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule1, *power_rule2; struct ieee80211_power_rule *power_rule; const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2; struct ieee80211_wmm_rule *wmm_rule; u32 freq_diff, max_bandwidth1, max_bandwidth2; freq_range1 = &rule1->freq_range; freq_range2 = &rule2->freq_range; freq_range = &intersected_rule->freq_range; power_rule1 = &rule1->power_rule; power_rule2 = &rule2->power_rule; power_rule = &intersected_rule->power_rule; wmm_rule1 = &rule1->wmm_rule; wmm_rule2 = &rule2->wmm_rule; wmm_rule = &intersected_rule->wmm_rule; freq_range->start_freq_khz = max(freq_range1->start_freq_khz, freq_range2->start_freq_khz); freq_range->end_freq_khz = min(freq_range1->end_freq_khz, freq_range2->end_freq_khz); max_bandwidth1 = freq_range1->max_bandwidth_khz; max_bandwidth2 = freq_range2->max_bandwidth_khz; if (rule1->flags & NL80211_RRF_AUTO_BW) max_bandwidth1 = reg_get_max_bandwidth(rd1, rule1); if (rule2->flags & NL80211_RRF_AUTO_BW) max_bandwidth2 = reg_get_max_bandwidth(rd2, rule2); freq_range->max_bandwidth_khz = min(max_bandwidth1, max_bandwidth2); intersected_rule->flags = rule1->flags | rule2->flags; /* * In case NL80211_RRF_AUTO_BW requested for both rules * set AUTO_BW in intersected rule also. Next we will * calculate BW correctly in handle_channel function. * In other case remove AUTO_BW flag while we calculate * maximum bandwidth correctly and auto calculation is * not required. */ if ((rule1->flags & NL80211_RRF_AUTO_BW) && (rule2->flags & NL80211_RRF_AUTO_BW)) intersected_rule->flags |= NL80211_RRF_AUTO_BW; else intersected_rule->flags &= ~NL80211_RRF_AUTO_BW; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->max_bandwidth_khz > freq_diff) freq_range->max_bandwidth_khz = freq_diff; power_rule->max_eirp = min(power_rule1->max_eirp, power_rule2->max_eirp); power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain, power_rule2->max_antenna_gain); intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, rule2->dfs_cac_ms); if (rule1->has_wmm && rule2->has_wmm) { u8 ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { reg_wmm_rules_intersect(&wmm_rule1->client[ac], &wmm_rule2->client[ac], &wmm_rule->client[ac]); reg_wmm_rules_intersect(&wmm_rule1->ap[ac], &wmm_rule2->ap[ac], &wmm_rule->ap[ac]); } intersected_rule->has_wmm = true; } else if (rule1->has_wmm) { *wmm_rule = *wmm_rule1; intersected_rule->has_wmm = true; } else if (rule2->has_wmm) { *wmm_rule = *wmm_rule2; intersected_rule->has_wmm = true; } else { intersected_rule->has_wmm = false; } if (!is_valid_reg_rule(intersected_rule)) return -EINVAL; return 0; } /* check whether old rule contains new rule */ static bool rule_contains(struct ieee80211_reg_rule *r1, struct ieee80211_reg_rule *r2) { /* for simplicity, currently consider only same flags */ if (r1->flags != r2->flags) return false; /* verify r1 is more restrictive */ if ((r1->power_rule.max_antenna_gain > r2->power_rule.max_antenna_gain) || r1->power_rule.max_eirp > r2->power_rule.max_eirp) return false; /* make sure r2's range is contained within r1 */ if (r1->freq_range.start_freq_khz > r2->freq_range.start_freq_khz || r1->freq_range.end_freq_khz < r2->freq_range.end_freq_khz) return false; /* and finally verify that r1.max_bw >= r2.max_bw */ if (r1->freq_range.max_bandwidth_khz < r2->freq_range.max_bandwidth_khz) return false; return true; } /* add or extend current rules. do nothing if rule is already contained */ static void add_rule(struct ieee80211_reg_rule *rule, struct ieee80211_reg_rule *reg_rules, u32 *n_rules) { struct ieee80211_reg_rule *tmp_rule; int i; for (i = 0; i < *n_rules; i++) { tmp_rule = ®_rules[i]; /* rule is already contained - do nothing */ if (rule_contains(tmp_rule, rule)) return; /* extend rule if possible */ if (rule_contains(rule, tmp_rule)) { memcpy(tmp_rule, rule, sizeof(*rule)); return; } } memcpy(®_rules[*n_rules], rule, sizeof(*rule)); (*n_rules)++; } /** * regdom_intersect - do the intersection between two regulatory domains * @rd1: first regulatory domain * @rd2: second regulatory domain * * Use this function to get the intersection between two regulatory domains. * Once completed we will mark the alpha2 for the rd as intersected, "98", * as no one single alpha2 can represent this regulatory domain. * * Returns a pointer to the regulatory domain structure which will hold the * resulting intersection of rules between rd1 and rd2. We will * kzalloc() this structure for you. * * Returns: the intersected regdomain */ static struct ieee80211_regdomain * regdom_intersect(const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2) { int r; unsigned int x, y; unsigned int num_rules = 0; const struct ieee80211_reg_rule *rule1, *rule2; struct ieee80211_reg_rule intersected_rule; struct ieee80211_regdomain *rd; if (!rd1 || !rd2) return NULL; /* * First we get a count of the rules we'll need, then we actually * build them. This is to so we can malloc() and free() a * regdomain once. The reason we use reg_rules_intersect() here * is it will return -EINVAL if the rule computed makes no sense. * All rules that do check out OK are valid. */ for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; if (!reg_rules_intersect(rd1, rd2, rule1, rule2, &intersected_rule)) num_rules++; } } if (!num_rules) return NULL; rd = kzalloc(struct_size(rd, reg_rules, num_rules), GFP_KERNEL); if (!rd) return NULL; for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; r = reg_rules_intersect(rd1, rd2, rule1, rule2, &intersected_rule); /* * No need to memset here the intersected rule here as * we're not using the stack anymore */ if (r) continue; add_rule(&intersected_rule, rd->reg_rules, &rd->n_reg_rules); } } rd->alpha2[0] = '9'; rd->alpha2[1] = '8'; rd->dfs_region = reg_intersect_dfs_region(rd1->dfs_region, rd2->dfs_region); return rd; } /* * XXX: add support for the rest of enum nl80211_reg_rule_flags, we may * want to just have the channel structure use these */ static u32 map_regdom_flags(u32 rd_flags) { u32 channel_flags = 0; if (rd_flags & NL80211_RRF_NO_IR_ALL) channel_flags |= IEEE80211_CHAN_NO_IR; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; if (rd_flags & NL80211_RRF_NO_OFDM) channel_flags |= IEEE80211_CHAN_NO_OFDM; if (rd_flags & NL80211_RRF_NO_OUTDOOR) channel_flags |= IEEE80211_CHAN_INDOOR_ONLY; if (rd_flags & NL80211_RRF_IR_CONCURRENT) channel_flags |= IEEE80211_CHAN_IR_CONCURRENT; if (rd_flags & NL80211_RRF_NO_HT40MINUS) channel_flags |= IEEE80211_CHAN_NO_HT40MINUS; if (rd_flags & NL80211_RRF_NO_HT40PLUS) channel_flags |= IEEE80211_CHAN_NO_HT40PLUS; if (rd_flags & NL80211_RRF_NO_80MHZ) channel_flags |= IEEE80211_CHAN_NO_80MHZ; if (rd_flags & NL80211_RRF_NO_160MHZ) channel_flags |= IEEE80211_CHAN_NO_160MHZ; if (rd_flags & NL80211_RRF_NO_HE) channel_flags |= IEEE80211_CHAN_NO_HE; if (rd_flags & NL80211_RRF_NO_320MHZ) channel_flags |= IEEE80211_CHAN_NO_320MHZ; if (rd_flags & NL80211_RRF_NO_EHT) channel_flags |= IEEE80211_CHAN_NO_EHT; if (rd_flags & NL80211_RRF_DFS_CONCURRENT) channel_flags |= IEEE80211_CHAN_DFS_CONCURRENT; if (rd_flags & NL80211_RRF_NO_6GHZ_VLP_CLIENT) channel_flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT; if (rd_flags & NL80211_RRF_NO_6GHZ_AFC_CLIENT) channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT; if (rd_flags & NL80211_RRF_PSD) channel_flags |= IEEE80211_CHAN_PSD; if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP) channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; return channel_flags; } static const struct ieee80211_reg_rule * freq_reg_info_regd(u32 center_freq, const struct ieee80211_regdomain *regd, u32 bw) { int i; bool band_rule_found = false; bool bw_fits = false; if (!regd) return ERR_PTR(-EINVAL); for (i = 0; i < regd->n_reg_rules; i++) { const struct ieee80211_reg_rule *rr; const struct ieee80211_freq_range *fr = NULL; rr = ®d->reg_rules[i]; fr = &rr->freq_range; /* * We only need to know if one frequency rule was * in center_freq's band, that's enough, so let's * not overwrite it once found */ if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); bw_fits = cfg80211_does_bw_fit_range(fr, center_freq, bw); if (band_rule_found && bw_fits) return rr; } if (!band_rule_found) return ERR_PTR(-ERANGE); return ERR_PTR(-EINVAL); } static const struct ieee80211_reg_rule * __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw) { const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy); static const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20}; const struct ieee80211_reg_rule *reg_rule = ERR_PTR(-ERANGE); int i = ARRAY_SIZE(bws) - 1; u32 bw; for (bw = MHZ_TO_KHZ(bws[i]); bw >= min_bw; bw = MHZ_TO_KHZ(bws[i--])) { reg_rule = freq_reg_info_regd(center_freq, regd, bw); if (!IS_ERR(reg_rule)) return reg_rule; } return reg_rule; } const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, u32 center_freq) { u32 min_bw = center_freq < MHZ_TO_KHZ(1000) ? 1 : 20; return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(min_bw)); } EXPORT_SYMBOL(freq_reg_info); const char *reg_initiator_name(enum nl80211_reg_initiator initiator) { switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: return "core"; case NL80211_REGDOM_SET_BY_USER: return "user"; case NL80211_REGDOM_SET_BY_DRIVER: return "driver"; case NL80211_REGDOM_SET_BY_COUNTRY_IE: return "country element"; default: WARN_ON(1); return "bug"; } } EXPORT_SYMBOL(reg_initiator_name); static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd, const struct ieee80211_reg_rule *reg_rule, const struct ieee80211_channel *chan) { const struct ieee80211_freq_range *freq_range = NULL; u32 max_bandwidth_khz, center_freq_khz, bw_flags = 0; bool is_s1g = chan->band == NL80211_BAND_S1GHZ; freq_range = ®_rule->freq_range; max_bandwidth_khz = freq_range->max_bandwidth_khz; center_freq_khz = ieee80211_channel_to_khz(chan); /* Check if auto calculation requested */ if (reg_rule->flags & NL80211_RRF_AUTO_BW) max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); /* If we get a reg_rule we can assume that at least 5Mhz fit */ if (!cfg80211_does_bw_fit_range(freq_range, center_freq_khz, MHZ_TO_KHZ(10))) bw_flags |= IEEE80211_CHAN_NO_10MHZ; if (!cfg80211_does_bw_fit_range(freq_range, center_freq_khz, MHZ_TO_KHZ(20))) bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (is_s1g) { /* S1G is strict about non overlapping channels. We can * calculate which bandwidth is allowed per channel by finding * the largest bandwidth which cleanly divides the freq_range. */ int edge_offset; int ch_bw = max_bandwidth_khz; while (ch_bw) { edge_offset = (center_freq_khz - ch_bw / 2) - freq_range->start_freq_khz; if (edge_offset % ch_bw == 0) { switch (KHZ_TO_MHZ(ch_bw)) { case 1: bw_flags |= IEEE80211_CHAN_1MHZ; break; case 2: bw_flags |= IEEE80211_CHAN_2MHZ; break; case 4: bw_flags |= IEEE80211_CHAN_4MHZ; break; case 8: bw_flags |= IEEE80211_CHAN_8MHZ; break; case 16: bw_flags |= IEEE80211_CHAN_16MHZ; break; default: /* If we got here, no bandwidths fit on * this frequency, ie. band edge. */ bw_flags |= IEEE80211_CHAN_DISABLED; break; } break; } ch_bw /= 2; } } else { if (max_bandwidth_khz < MHZ_TO_KHZ(10)) bw_flags |= IEEE80211_CHAN_NO_10MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(20)) bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(40)) bw_flags |= IEEE80211_CHAN_NO_HT40; if (max_bandwidth_khz < MHZ_TO_KHZ(80)) bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) bw_flags |= IEEE80211_CHAN_NO_160MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(320)) bw_flags |= IEEE80211_CHAN_NO_320MHZ; } return bw_flags; } static void handle_channel_single_rule(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *chan, u32 flags, struct regulatory_request *lr, struct wiphy *request_wiphy, const struct ieee80211_reg_rule *reg_rule) { u32 bw_flags = 0; const struct ieee80211_power_rule *power_rule = NULL; const struct ieee80211_regdomain *regd; regd = reg_get_regdomain(wiphy); power_rule = ®_rule->power_rule; bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan); if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { /* * This guarantees the driver's requested regulatory domain * will always be used as a base for further regulatory * settings */ chan->flags = chan->orig_flags = map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = chan->orig_mag = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_reg_power = chan->max_power = chan->orig_mpwr = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; } if (chan->flags & IEEE80211_CHAN_PSD) chan->psd = reg_rule->psd; return; } chan->dfs_state = NL80211_DFS_USABLE; chan->dfs_state_entered = jiffies; chan->beacon_found = false; chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); chan->max_antenna_gain = min_t(int, chan->orig_mag, MBI_TO_DBI(power_rule->max_antenna_gain)); chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } if (chan->flags & IEEE80211_CHAN_PSD) chan->psd = reg_rule->psd; if (chan->orig_mpwr) { /* * Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER * will always follow the passed country IE power settings. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER) chan->max_power = chan->max_reg_power; else chan->max_power = min(chan->orig_mpwr, chan->max_reg_power); } else chan->max_power = chan->max_reg_power; } static void handle_channel_adjacent_rules(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *chan, u32 flags, struct regulatory_request *lr, struct wiphy *request_wiphy, const struct ieee80211_reg_rule *rrule1, const struct ieee80211_reg_rule *rrule2, struct ieee80211_freq_range *comb_range) { u32 bw_flags1 = 0; u32 bw_flags2 = 0; const struct ieee80211_power_rule *power_rule1 = NULL; const struct ieee80211_power_rule *power_rule2 = NULL; const struct ieee80211_regdomain *regd; regd = reg_get_regdomain(wiphy); power_rule1 = &rrule1->power_rule; power_rule2 = &rrule2->power_rule; bw_flags1 = reg_rule_to_chan_bw_flags(regd, rrule1, chan); bw_flags2 = reg_rule_to_chan_bw_flags(regd, rrule2, chan); if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { /* This guarantees the driver's requested regulatory domain * will always be used as a base for further regulatory * settings */ chan->flags = map_regdom_flags(rrule1->flags) | map_regdom_flags(rrule2->flags) | bw_flags1 | bw_flags2; chan->orig_flags = chan->flags; chan->max_antenna_gain = min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain), MBI_TO_DBI(power_rule2->max_antenna_gain)); chan->orig_mag = chan->max_antenna_gain; chan->max_reg_power = min_t(int, MBM_TO_DBM(power_rule1->max_eirp), MBM_TO_DBM(power_rule2->max_eirp)); chan->max_power = chan->max_reg_power; chan->orig_mpwr = chan->max_reg_power; if (chan->flags & IEEE80211_CHAN_RADAR) { chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) chan->dfs_cac_ms = max_t(unsigned int, rrule1->dfs_cac_ms, rrule2->dfs_cac_ms); } if ((rrule1->flags & NL80211_RRF_PSD) && (rrule2->flags & NL80211_RRF_PSD)) chan->psd = min_t(s8, rrule1->psd, rrule2->psd); else chan->flags &= ~NL80211_RRF_PSD; return; } chan->dfs_state = NL80211_DFS_USABLE; chan->dfs_state_entered = jiffies; chan->beacon_found = false; chan->flags = flags | bw_flags1 | bw_flags2 | map_regdom_flags(rrule1->flags) | map_regdom_flags(rrule2->flags); /* reg_rule_to_chan_bw_flags may forbids 10 and forbids 20 MHz * (otherwise no adj. rule case), recheck therefore */ if (cfg80211_does_bw_fit_range(comb_range, ieee80211_channel_to_khz(chan), MHZ_TO_KHZ(10))) chan->flags &= ~IEEE80211_CHAN_NO_10MHZ; if (cfg80211_does_bw_fit_range(comb_range, ieee80211_channel_to_khz(chan), MHZ_TO_KHZ(20))) chan->flags &= ~IEEE80211_CHAN_NO_20MHZ; chan->max_antenna_gain = min_t(int, chan->orig_mag, min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain), MBI_TO_DBI(power_rule2->max_antenna_gain))); chan->max_reg_power = min_t(int, MBM_TO_DBM(power_rule1->max_eirp), MBM_TO_DBM(power_rule2->max_eirp)); if (chan->flags & IEEE80211_CHAN_RADAR) { if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) chan->dfs_cac_ms = max_t(unsigned int, rrule1->dfs_cac_ms, rrule2->dfs_cac_ms); else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } if (chan->orig_mpwr) { /* Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER * will always follow the passed country IE power settings. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER) chan->max_power = chan->max_reg_power; else chan->max_power = min(chan->orig_mpwr, chan->max_reg_power); } else { chan->max_power = chan->max_reg_power; } } /* Note that right now we assume the desired channel bandwidth * is always 20 MHz for each individual channel (HT40 uses 20 MHz * per channel, the primary and the extension channel). */ static void handle_channel(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *chan) { const u32 orig_chan_freq = ieee80211_channel_to_khz(chan); struct regulatory_request *lr = get_last_request(); struct wiphy *request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); const struct ieee80211_reg_rule *rrule = NULL; const struct ieee80211_reg_rule *rrule1 = NULL; const struct ieee80211_reg_rule *rrule2 = NULL; u32 flags = chan->orig_flags; rrule = freq_reg_info(wiphy, orig_chan_freq); if (IS_ERR(rrule)) { /* check for adjacent match, therefore get rules for * chan - 20 MHz and chan + 20 MHz and test * if reg rules are adjacent */ rrule1 = freq_reg_info(wiphy, orig_chan_freq - MHZ_TO_KHZ(20)); rrule2 = freq_reg_info(wiphy, orig_chan_freq + MHZ_TO_KHZ(20)); if (!IS_ERR(rrule1) && !IS_ERR(rrule2)) { struct ieee80211_freq_range comb_range; if (rrule1->freq_range.end_freq_khz != rrule2->freq_range.start_freq_khz) goto disable_chan; comb_range.start_freq_khz = rrule1->freq_range.start_freq_khz; comb_range.end_freq_khz = rrule2->freq_range.end_freq_khz; comb_range.max_bandwidth_khz = min_t(u32, rrule1->freq_range.max_bandwidth_khz, rrule2->freq_range.max_bandwidth_khz); if (!cfg80211_does_bw_fit_range(&comb_range, orig_chan_freq, MHZ_TO_KHZ(20))) goto disable_chan; handle_channel_adjacent_rules(wiphy, initiator, chan, flags, lr, request_wiphy, rrule1, rrule2, &comb_range); return; } disable_chan: /* We will disable all channels that do not match our * received regulatory rule unless the hint is coming * from a Country IE and the Country IE had no information * about a band. The IEEE 802.11 spec allows for an AP * to send only a subset of the regulatory rules allowed, * so an AP in the US that only supports 2.4 GHz may only send * a country IE with information for the 2.4 GHz band * while 5 GHz is still supported. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && PTR_ERR(rrule) == -ERANGE) return; if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { pr_debug("Disabling freq %d.%03d MHz for good\n", chan->center_freq, chan->freq_offset); chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; } else { pr_debug("Disabling freq %d.%03d MHz\n", chan->center_freq, chan->freq_offset); chan->flags |= IEEE80211_CHAN_DISABLED; } return; } handle_channel_single_rule(wiphy, initiator, chan, flags, lr, request_wiphy, rrule); } static void handle_band(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_supported_band *sband) { unsigned int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) handle_channel(wiphy, initiator, &sband->channels[i]); } static bool reg_request_cell_base(struct regulatory_request *request) { if (request->initiator != NL80211_REGDOM_SET_BY_USER) return false; return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE; } bool reg_last_request_cell_base(void) { return reg_request_cell_base(get_last_request()); } #ifdef CONFIG_CFG80211_REG_CELLULAR_HINTS /* Core specific check */ static enum reg_request_treatment reg_ignore_cell_hint(struct regulatory_request *pending_request) { struct regulatory_request *lr = get_last_request(); if (!reg_num_devs_support_basehint) return REG_REQ_IGNORE; if (reg_request_cell_base(lr) && !regdom_changes(pending_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_OK; } /* Device specific check */ static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) { return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS); } #else static enum reg_request_treatment reg_ignore_cell_hint(struct regulatory_request *pending_request) { return REG_REQ_IGNORE; } static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) { return true; } #endif static bool wiphy_strict_alpha2_regd(struct wiphy *wiphy) { if (wiphy->regulatory_flags & REGULATORY_STRICT_REG && !(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)) return true; return false; } static bool ignore_reg_update(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { struct regulatory_request *lr = get_last_request(); if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) return true; if (!lr) { pr_debug("Ignoring regulatory request set by %s since last_request is not set\n", reg_initiator_name(initiator)); return true; } if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { pr_debug("Ignoring regulatory request set by %s since the driver uses its own custom regulatory domain\n", reg_initiator_name(initiator)); return true; } /* * wiphy->regd will be set once the device has its own * desired regulatory domain set */ if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd && initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && !is_world_regdom(lr->alpha2)) { pr_debug("Ignoring regulatory request set by %s since the driver requires its own regulatory domain to be set first\n", reg_initiator_name(initiator)); return true; } if (reg_request_cell_base(lr)) return reg_dev_ignore_cell_hint(wiphy); return false; } static bool reg_is_world_roaming(struct wiphy *wiphy) { const struct ieee80211_regdomain *cr = get_cfg80211_regdom(); const struct ieee80211_regdomain *wr = get_wiphy_regdom(wiphy); struct regulatory_request *lr = get_last_request(); if (is_world_regdom(cr->alpha2) || (wr && is_world_regdom(wr->alpha2))) return true; if (lr && lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) return true; return false; } static void reg_call_notifier(struct wiphy *wiphy, struct regulatory_request *request) { if (wiphy->reg_notifier) wiphy->reg_notifier(wiphy, request); } static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx, struct reg_beacon *reg_beacon) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; bool channel_changed = false; struct ieee80211_channel chan_before; struct regulatory_request *lr = get_last_request(); sband = wiphy->bands[reg_beacon->chan.band]; chan = &sband->channels[chan_idx]; if (likely(!ieee80211_channel_equal(chan, ®_beacon->chan))) return; if (chan->beacon_found) return; chan->beacon_found = true; if (!reg_is_world_roaming(wiphy)) return; if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS) return; chan_before = *chan; if (chan->flags & IEEE80211_CHAN_NO_IR) { chan->flags &= ~IEEE80211_CHAN_NO_IR; channel_changed = true; } if (channel_changed) { nl80211_send_beacon_hint_event(wiphy, &chan_before, chan); if (wiphy->flags & WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON) reg_call_notifier(wiphy, lr); } } /* * Called when a scan on a wiphy finds a beacon on * new channel */ static void wiphy_update_new_beacon(struct wiphy *wiphy, struct reg_beacon *reg_beacon) { unsigned int i; struct ieee80211_supported_band *sband; if (!wiphy->bands[reg_beacon->chan.band]) return; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } /* * Called upon reg changes or a new wiphy is added */ static void wiphy_update_beacon_reg(struct wiphy *wiphy) { unsigned int i; struct ieee80211_supported_band *sband; struct reg_beacon *reg_beacon; list_for_each_entry(reg_beacon, ®_beacon_list, list) { if (!wiphy->bands[reg_beacon->chan.band]) continue; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } } /* Reap the advantages of previously found beacons */ static void reg_process_beacons(struct wiphy *wiphy) { /* * Means we are just firing up cfg80211, so no beacons would * have been processed yet. */ if (!last_request) return; wiphy_update_beacon_reg(wiphy); } static bool is_ht40_allowed(struct ieee80211_channel *chan) { if (!chan) return false; if (chan->flags & IEEE80211_CHAN_DISABLED) return false; /* This would happen when regulatory rules disallow HT40 completely */ if ((chan->flags & IEEE80211_CHAN_NO_HT40) == IEEE80211_CHAN_NO_HT40) return false; return true; } static void reg_process_ht_flags_channel(struct wiphy *wiphy, struct ieee80211_channel *channel) { struct ieee80211_supported_band *sband = wiphy->bands[channel->band]; struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; const struct ieee80211_regdomain *regd; unsigned int i; u32 flags; if (!is_ht40_allowed(channel)) { channel->flags |= IEEE80211_CHAN_NO_HT40; return; } /* * We need to ensure the extension channels exist to * be able to use HT40- or HT40+, this finds them (or not) */ for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *c = &sband->channels[i]; if (c->center_freq == (channel->center_freq - 20)) channel_before = c; if (c->center_freq == (channel->center_freq + 20)) channel_after = c; } flags = 0; regd = get_wiphy_regdom(wiphy); if (regd) { const struct ieee80211_reg_rule *reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(channel->center_freq), regd, MHZ_TO_KHZ(20)); if (!IS_ERR(reg_rule)) flags = reg_rule->flags; } /* * Please note that this assumes target bandwidth is 20 MHz, * if that ever changes we also need to change the below logic * to include that as well. */ if (!is_ht40_allowed(channel_before) || flags & NL80211_RRF_NO_HT40MINUS) channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; if (!is_ht40_allowed(channel_after) || flags & NL80211_RRF_NO_HT40PLUS) channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; } static void reg_process_ht_flags_band(struct wiphy *wiphy, struct ieee80211_supported_band *sband) { unsigned int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) reg_process_ht_flags_channel(wiphy, &sband->channels[i]); } static void reg_process_ht_flags(struct wiphy *wiphy) { enum nl80211_band band; if (!wiphy) return; for (band = 0; band < NUM_NL80211_BANDS; band++) reg_process_ht_flags_band(wiphy, wiphy->bands[band]); } static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) { struct cfg80211_chan_def chandef = {}; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); enum nl80211_iftype iftype; bool ret; int link; iftype = wdev->iftype; /* make sure the interface is active */ if (!wdev->netdev || !netif_running(wdev->netdev)) return true; for (link = 0; link < ARRAY_SIZE(wdev->links); link++) { struct ieee80211_channel *chan; if (!wdev->valid_links && link > 0) break; if (wdev->valid_links && !(wdev->valid_links & BIT(link))) continue; switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (!wdev->links[link].ap.beacon_interval) continue; chandef = wdev->links[link].ap.chandef; break; case NL80211_IFTYPE_MESH_POINT: if (!wdev->u.mesh.beacon_interval) continue; chandef = wdev->u.mesh.chandef; break; case NL80211_IFTYPE_ADHOC: if (!wdev->u.ibss.ssid_len) continue; chandef = wdev->u.ibss.chandef; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: /* Maybe we could consider disabling that link only? */ if (!wdev->links[link].client.current_bss) continue; chan = wdev->links[link].client.current_bss->pub.channel; if (!chan) continue; if (!rdev->ops->get_channel || rdev_get_channel(rdev, wdev, link, &chandef)) cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_DEVICE: /* no enforcement required */ break; case NL80211_IFTYPE_OCB: if (!wdev->u.ocb.chandef.chan) continue; chandef = wdev->u.ocb.chandef; break; case NL80211_IFTYPE_NAN: /* we have no info, but NAN is also pretty universal */ continue; default: /* others not implemented for now */ WARN_ON_ONCE(1); break; } switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); if (!ret) return ret; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: ret = cfg80211_chandef_usable(wiphy, &chandef, IEEE80211_CHAN_DISABLED); if (!ret) return ret; break; default: break; } } return true; } static void reg_leave_invalid_chans(struct wiphy *wiphy) { struct wireless_dev *wdev; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); wiphy_lock(wiphy); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) if (!reg_wdev_chan_valid(wiphy, wdev)) cfg80211_leave(rdev, wdev); wiphy_unlock(wiphy); } static void reg_check_chans_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; pr_debug("Verifying active interfaces after reg change\n"); rtnl_lock(); for_each_rdev(rdev) reg_leave_invalid_chans(&rdev->wiphy); rtnl_unlock(); } void reg_check_channels(void) { /* * Give usermode a chance to do something nicer (move to another * channel, orderly disconnection), before forcing a disconnection. */ mod_delayed_work(system_power_efficient_wq, ®_check_chans, msecs_to_jiffies(REG_ENFORCE_GRACE_MS)); } static void wiphy_update_regulatory(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { enum nl80211_band band; struct regulatory_request *lr = get_last_request(); if (ignore_reg_update(wiphy, initiator)) { /* * Regulatory updates set by CORE are ignored for custom * regulatory cards. Let us notify the changes to the driver, * as some drivers used this to restore its orig_* reg domain. */ if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && !(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)) reg_call_notifier(wiphy, lr); return; } lr->dfs_region = get_cfg80211_regdom()->dfs_region; for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band(wiphy, initiator, wiphy->bands[band]); reg_process_beacons(wiphy); reg_process_ht_flags(wiphy); reg_call_notifier(wiphy, lr); } static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; ASSERT_RTNL(); for_each_rdev(rdev) { wiphy = &rdev->wiphy; wiphy_update_regulatory(wiphy, initiator); } reg_check_channels(); } static void handle_channel_custom(struct wiphy *wiphy, struct ieee80211_channel *chan, const struct ieee80211_regdomain *regd, u32 min_bw) { u32 bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; u32 bw, center_freq_khz; center_freq_khz = ieee80211_channel_to_khz(chan); for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) { reg_rule = freq_reg_info_regd(center_freq_khz, regd, bw); if (!IS_ERR(reg_rule)) break; } if (IS_ERR_OR_NULL(reg_rule)) { pr_debug("Disabling freq %d.%03d MHz as custom regd has no rule that fits it\n", chan->center_freq, chan->freq_offset); if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { chan->flags |= IEEE80211_CHAN_DISABLED; } else { chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; } return; } power_rule = ®_rule->power_rule; bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan); chan->dfs_state_entered = jiffies; chan->dfs_state = NL80211_DFS_USABLE; chan->beacon_found = false; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) chan->flags = chan->orig_flags | bw_flags | map_regdom_flags(reg_rule->flags); else chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_reg_power = chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } if (chan->flags & IEEE80211_CHAN_PSD) chan->psd = reg_rule->psd; chan->max_power = chan->max_reg_power; } static void handle_band_custom(struct wiphy *wiphy, struct ieee80211_supported_band *sband, const struct ieee80211_regdomain *regd) { unsigned int i; if (!sband) return; /* * We currently assume that you always want at least 20 MHz, * otherwise channel 12 might get enabled if this rule is * compatible to US, which permits 2402 - 2472 MHz. */ for (i = 0; i < sband->n_channels; i++) handle_channel_custom(wiphy, &sband->channels[i], regd, MHZ_TO_KHZ(20)); } /* Used by drivers prior to wiphy registration */ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { const struct ieee80211_regdomain *new_regd, *tmp; enum nl80211_band band; unsigned int bands_set = 0; WARN(!(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG), "wiphy should have REGULATORY_CUSTOM_REG\n"); wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; handle_band_custom(wiphy, wiphy->bands[band], regd); bands_set++; } /* * no point in calling this if it won't have any effect * on your device's supported bands. */ WARN_ON(!bands_set); new_regd = reg_copy_regd(regd); if (IS_ERR(new_regd)) return; rtnl_lock(); wiphy_lock(wiphy); tmp = get_wiphy_regdom(wiphy); rcu_assign_pointer(wiphy->regd, new_regd); rcu_free_regdom(tmp); wiphy_unlock(wiphy); rtnl_unlock(); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); static void reg_set_request_processed(void) { bool need_more_processing = false; struct regulatory_request *lr = get_last_request(); lr->processed = true; spin_lock(®_requests_lock); if (!list_empty(®_requests_list)) need_more_processing = true; spin_unlock(®_requests_lock); cancel_crda_timeout(); if (need_more_processing) schedule_work(®_work); } /** * reg_process_hint_core - process core regulatory requests * @core_request: a pending core regulatory request * * The wireless subsystem can use this function to process * a regulatory request issued by the regulatory core. * * Returns: %REG_REQ_OK or %REG_REQ_IGNORE, indicating if the * hint was processed or ignored */ static enum reg_request_treatment reg_process_hint_core(struct regulatory_request *core_request) { if (reg_query_database(core_request)) { core_request->intersect = false; core_request->processed = false; reg_update_last_request(core_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_user(struct regulatory_request *user_request) { struct regulatory_request *lr = get_last_request(); if (reg_request_cell_base(user_request)) return reg_ignore_cell_hint(user_request); if (reg_request_cell_base(lr)) return REG_REQ_IGNORE; if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_REQ_INTERSECT; /* * If the user knows better the user should set the regdom * to their country before the IE is picked up */ if (lr->initiator == NL80211_REGDOM_SET_BY_USER && lr->intersect) return REG_REQ_IGNORE; /* * Process user requests only after previous user/driver/core * requests have been processed */ if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE || lr->initiator == NL80211_REGDOM_SET_BY_DRIVER || lr->initiator == NL80211_REGDOM_SET_BY_USER) && regdom_changes(lr->alpha2)) return REG_REQ_IGNORE; if (!regdom_changes(user_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_OK; } /** * reg_process_hint_user - process user regulatory requests * @user_request: a pending user regulatory request * * The wireless subsystem can use this function to process * a regulatory request initiated by userspace. * * Returns: %REG_REQ_OK or %REG_REQ_IGNORE, indicating if the * hint was processed or ignored */ static enum reg_request_treatment reg_process_hint_user(struct regulatory_request *user_request) { enum reg_request_treatment treatment; treatment = __reg_process_hint_user(user_request); if (treatment == REG_REQ_IGNORE || treatment == REG_REQ_ALREADY_SET) return REG_REQ_IGNORE; user_request->intersect = treatment == REG_REQ_INTERSECT; user_request->processed = false; if (reg_query_database(user_request)) { reg_update_last_request(user_request); user_alpha2[0] = user_request->alpha2[0]; user_alpha2[1] = user_request->alpha2[1]; return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_driver(struct regulatory_request *driver_request) { struct regulatory_request *lr = get_last_request(); if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) { if (regdom_changes(driver_request->alpha2)) return REG_REQ_OK; return REG_REQ_ALREADY_SET; } /* * This would happen if you unplug and plug your card * back in or if you add a new device for which the previously * loaded card also agrees on the regulatory domain. */ if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && !regdom_changes(driver_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_INTERSECT; } /** * reg_process_hint_driver - process driver regulatory requests * @wiphy: the wireless device for the regulatory request * @driver_request: a pending driver regulatory request * * The wireless subsystem can use this function to process * a regulatory request issued by an 802.11 driver. * * Returns: one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_driver(struct wiphy *wiphy, struct regulatory_request *driver_request) { const struct ieee80211_regdomain *regd, *tmp; enum reg_request_treatment treatment; treatment = __reg_process_hint_driver(driver_request); switch (treatment) { case REG_REQ_OK: break; case REG_REQ_IGNORE: return REG_REQ_IGNORE; case REG_REQ_INTERSECT: case REG_REQ_ALREADY_SET: regd = reg_copy_regd(get_cfg80211_regdom()); if (IS_ERR(regd)) return REG_REQ_IGNORE; tmp = get_wiphy_regdom(wiphy); ASSERT_RTNL(); wiphy_lock(wiphy); rcu_assign_pointer(wiphy->regd, regd); wiphy_unlock(wiphy); rcu_free_regdom(tmp); } driver_request->intersect = treatment == REG_REQ_INTERSECT; driver_request->processed = false; /* * Since CRDA will not be called in this case as we already * have applied the requested regulatory domain before we just * inform userspace we have processed the request */ if (treatment == REG_REQ_ALREADY_SET) { nl80211_send_reg_change_event(driver_request); reg_update_last_request(driver_request); reg_set_request_processed(); return REG_REQ_ALREADY_SET; } if (reg_query_database(driver_request)) { reg_update_last_request(driver_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_country_ie(struct wiphy *wiphy, struct regulatory_request *country_ie_request) { struct wiphy *last_wiphy = NULL; struct regulatory_request *lr = get_last_request(); if (reg_request_cell_base(lr)) { /* Trust a Cell base station over the AP's country IE */ if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_IGNORE; return REG_REQ_ALREADY_SET; } else { if (wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_IGNORE) return REG_REQ_IGNORE; } if (unlikely(!is_an_alpha2(country_ie_request->alpha2))) return -EINVAL; if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_REQ_OK; last_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); if (last_wiphy != wiphy) { /* * Two cards with two APs claiming different * Country IE alpha2s. We could * intersect them, but that seems unlikely * to be correct. Reject second one for now. */ if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_IGNORE; return REG_REQ_ALREADY_SET; } if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_OK; return REG_REQ_ALREADY_SET; } /** * reg_process_hint_country_ie - process regulatory requests from country IEs * @wiphy: the wireless device for the regulatory request * @country_ie_request: a regulatory request from a country IE * * The wireless subsystem can use this function to process * a regulatory request issued by a country Information Element. * * Returns: one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_country_ie(struct wiphy *wiphy, struct regulatory_request *country_ie_request) { enum reg_request_treatment treatment; treatment = __reg_process_hint_country_ie(wiphy, country_ie_request); switch (treatment) { case REG_REQ_OK: break; case REG_REQ_IGNORE: return REG_REQ_IGNORE; case REG_REQ_ALREADY_SET: reg_free_request(country_ie_request); return REG_REQ_ALREADY_SET; case REG_REQ_INTERSECT: /* * This doesn't happen yet, not sure we * ever want to support it for this case. */ WARN_ONCE(1, "Unexpected intersection for country elements"); return REG_REQ_IGNORE; } country_ie_request->intersect = false; country_ie_request->processed = false; if (reg_query_database(country_ie_request)) { reg_update_last_request(country_ie_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2) { const struct ieee80211_regdomain *wiphy1_regd = NULL; const struct ieee80211_regdomain *wiphy2_regd = NULL; const struct ieee80211_regdomain *cfg80211_regd = NULL; bool dfs_domain_same; rcu_read_lock(); cfg80211_regd = rcu_dereference(cfg80211_regdomain); wiphy1_regd = rcu_dereference(wiphy1->regd); if (!wiphy1_regd) wiphy1_regd = cfg80211_regd; wiphy2_regd = rcu_dereference(wiphy2->regd); if (!wiphy2_regd) wiphy2_regd = cfg80211_regd; dfs_domain_same = wiphy1_regd->dfs_region == wiphy2_regd->dfs_region; rcu_read_unlock(); return dfs_domain_same; } static void reg_copy_dfs_chan_state(struct ieee80211_channel *dst_chan, struct ieee80211_channel *src_chan) { if (!(dst_chan->flags & IEEE80211_CHAN_RADAR) || !(src_chan->flags & IEEE80211_CHAN_RADAR)) return; if (dst_chan->flags & IEEE80211_CHAN_DISABLED || src_chan->flags & IEEE80211_CHAN_DISABLED) return; if (src_chan->center_freq == dst_chan->center_freq && dst_chan->dfs_state == NL80211_DFS_USABLE) { dst_chan->dfs_state = src_chan->dfs_state; dst_chan->dfs_state_entered = src_chan->dfs_state_entered; } } static void wiphy_share_dfs_chan_state(struct wiphy *dst_wiphy, struct wiphy *src_wiphy) { struct ieee80211_supported_band *src_sband, *dst_sband; struct ieee80211_channel *src_chan, *dst_chan; int i, j, band; if (!reg_dfs_domain_same(dst_wiphy, src_wiphy)) return; for (band = 0; band < NUM_NL80211_BANDS; band++) { dst_sband = dst_wiphy->bands[band]; src_sband = src_wiphy->bands[band]; if (!dst_sband || !src_sband) continue; for (i = 0; i < dst_sband->n_channels; i++) { dst_chan = &dst_sband->channels[i]; for (j = 0; j < src_sband->n_channels; j++) { src_chan = &src_sband->channels[j]; reg_copy_dfs_chan_state(dst_chan, src_chan); } } } } static void wiphy_all_share_dfs_chan_state(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); for_each_rdev(rdev) { if (wiphy == &rdev->wiphy) continue; wiphy_share_dfs_chan_state(wiphy, &rdev->wiphy); } } /* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request) { struct wiphy *wiphy = NULL; enum reg_request_treatment treatment; enum nl80211_reg_initiator initiator = reg_request->initiator; if (reg_request->wiphy_idx != WIPHY_IDX_INVALID) wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: treatment = reg_process_hint_core(reg_request); break; case NL80211_REGDOM_SET_BY_USER: treatment = reg_process_hint_user(reg_request); break; case NL80211_REGDOM_SET_BY_DRIVER: if (!wiphy) goto out_free; treatment = reg_process_hint_driver(wiphy, reg_request); break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: if (!wiphy) goto out_free; treatment = reg_process_hint_country_ie(wiphy, reg_request); break; default: WARN(1, "invalid initiator %d\n", initiator); goto out_free; } if (treatment == REG_REQ_IGNORE) goto out_free; WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET, "unexpected treatment value %d\n", treatment); /* This is required so that the orig_* parameters are saved. * NOTE: treatment must be set for any case that reaches here! */ if (treatment == REG_REQ_ALREADY_SET && wiphy && wiphy->regulatory_flags & REGULATORY_STRICT_REG) { wiphy_update_regulatory(wiphy, initiator); wiphy_all_share_dfs_chan_state(wiphy); reg_check_channels(); } return; out_free: reg_free_request(reg_request); } static void notify_self_managed_wiphys(struct regulatory_request *request) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; for_each_rdev(rdev) { wiphy = &rdev->wiphy; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && request->initiator == NL80211_REGDOM_SET_BY_USER) reg_call_notifier(wiphy, request); } } /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we * must process each one atomically. */ static void reg_process_pending_hints(void) { struct regulatory_request *reg_request, *lr; lr = get_last_request(); /* When last_request->processed becomes true this will be rescheduled */ if (lr && !lr->processed) { pr_debug("Pending regulatory request, waiting for it to be processed...\n"); return; } spin_lock(®_requests_lock); if (list_empty(®_requests_list)) { spin_unlock(®_requests_lock); return; } reg_request = list_first_entry(®_requests_list, struct regulatory_request, list); list_del_init(®_request->list); spin_unlock(®_requests_lock); notify_self_managed_wiphys(reg_request); reg_process_hint(reg_request); lr = get_last_request(); spin_lock(®_requests_lock); if (!list_empty(®_requests_list) && lr && lr->processed) schedule_work(®_work); spin_unlock(®_requests_lock); } /* Processes beacon hints -- this has nothing to do with country IEs */ static void reg_process_pending_beacon_hints(void) { struct cfg80211_registered_device *rdev; struct reg_beacon *pending_beacon, *tmp; /* This goes through the _pending_ beacon list */ spin_lock_bh(®_pending_beacons_lock); list_for_each_entry_safe(pending_beacon, tmp, ®_pending_beacons, list) { list_del_init(&pending_beacon->list); /* Applies the beacon hint to current wiphys */ for_each_rdev(rdev) wiphy_update_new_beacon(&rdev->wiphy, pending_beacon); /* Remembers the beacon hint for new wiphys or reg changes */ list_add_tail(&pending_beacon->list, ®_beacon_list); } spin_unlock_bh(®_pending_beacons_lock); } static void reg_process_self_managed_hint(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); const struct ieee80211_regdomain *tmp; const struct ieee80211_regdomain *regd; enum nl80211_band band; struct regulatory_request request = {}; ASSERT_RTNL(); lockdep_assert_wiphy(wiphy); spin_lock(®_requests_lock); regd = rdev->requested_regd; rdev->requested_regd = NULL; spin_unlock(®_requests_lock); if (!regd) return; tmp = get_wiphy_regdom(wiphy); rcu_assign_pointer(wiphy->regd, regd); rcu_free_regdom(tmp); for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band_custom(wiphy, wiphy->bands[band], regd); reg_process_ht_flags(wiphy); request.wiphy_idx = get_wiphy_idx(wiphy); request.alpha2[0] = regd->alpha2[0]; request.alpha2[1] = regd->alpha2[1]; request.initiator = NL80211_REGDOM_SET_BY_DRIVER; if (wiphy->flags & WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER) reg_call_notifier(wiphy, &request); nl80211_send_wiphy_reg_change_event(&request); } static void reg_process_self_managed_hints(void) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); for_each_rdev(rdev) { wiphy_lock(&rdev->wiphy); reg_process_self_managed_hint(&rdev->wiphy); wiphy_unlock(&rdev->wiphy); } reg_check_channels(); } static void reg_todo(struct work_struct *work) { rtnl_lock(); reg_process_pending_hints(); reg_process_pending_beacon_hints(); reg_process_self_managed_hints(); rtnl_unlock(); } static void queue_regulatory_request(struct regulatory_request *request) { request->alpha2[0] = toupper(request->alpha2[0]); request->alpha2[1] = toupper(request->alpha2[1]); spin_lock(®_requests_lock); list_add_tail(&request->list, ®_requests_list); spin_unlock(®_requests_lock); schedule_work(®_work); } /* * Core regulatory hint -- happens during cfg80211_init() * and when we restore regulatory settings. */ static int regulatory_hint_core(const char *alpha2) { struct regulatory_request *request; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; request->wiphy_idx = WIPHY_IDX_INVALID; queue_regulatory_request(request); return 0; } /* User hints */ int regulatory_hint_user(const char *alpha2, enum nl80211_user_reg_hint_type user_reg_hint_type) { struct regulatory_request *request; if (WARN_ON(!alpha2)) return -EINVAL; if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) return -EINVAL; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = WIPHY_IDX_INVALID; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_USER; request->user_reg_hint_type = user_reg_hint_type; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); return 0; } void regulatory_hint_indoor(bool is_indoor, u32 portid) { spin_lock(®_indoor_lock); /* It is possible that more than one user space process is trying to * configure the indoor setting. To handle such cases, clear the indoor * setting in case that some process does not think that the device * is operating in an indoor environment. In addition, if a user space * process indicates that it is controlling the indoor setting, save its * portid, i.e., make it the owner. */ reg_is_indoor = is_indoor; if (reg_is_indoor) { if (!reg_is_indoor_portid) reg_is_indoor_portid = portid; } else { reg_is_indoor_portid = 0; } spin_unlock(®_indoor_lock); if (!is_indoor) reg_check_channels(); } void regulatory_netlink_notify(u32 portid) { spin_lock(®_indoor_lock); if (reg_is_indoor_portid != portid) { spin_unlock(®_indoor_lock); return; } reg_is_indoor = false; reg_is_indoor_portid = 0; spin_unlock(®_indoor_lock); reg_check_channels(); } /* Driver hints */ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) { struct regulatory_request *request; if (WARN_ON(!alpha2 || !wiphy)) return -EINVAL; wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_DRIVER; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); return 0; } EXPORT_SYMBOL(regulatory_hint); void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band, const u8 *country_ie, u8 country_ie_len) { char alpha2[2]; enum environment_cap env = ENVIRON_ANY; struct regulatory_request *request = NULL, *lr; /* IE len must be evenly divisible by 2 */ if (country_ie_len & 0x01) return; if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) return; request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return; alpha2[0] = country_ie[0]; alpha2[1] = country_ie[1]; if (country_ie[2] == 'I') env = ENVIRON_INDOOR; else if (country_ie[2] == 'O') env = ENVIRON_OUTDOOR; rcu_read_lock(); lr = get_last_request(); if (unlikely(!lr)) goto out; /* * We will run this only upon a successful connection on cfg80211. * We leave conflict resolution to the workqueue, where can hold * the RTNL. */ if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->wiphy_idx != WIPHY_IDX_INVALID) goto out; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; request->country_ie_env = env; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); request = NULL; out: kfree(request); rcu_read_unlock(); } static void restore_alpha2(char *alpha2, bool reset_user) { /* indicates there is no alpha2 to consider for restoration */ alpha2[0] = '9'; alpha2[1] = '7'; /* The user setting has precedence over the module parameter */ if (is_user_regdom_saved()) { /* Unless we're asked to ignore it and reset it */ if (reset_user) { pr_debug("Restoring regulatory settings including user preference\n"); user_alpha2[0] = '9'; user_alpha2[1] = '7'; /* * If we're ignoring user settings, we still need to * check the module parameter to ensure we put things * back as they were for a full restore. */ if (!is_world_regdom(ieee80211_regdom)) { pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } } else { pr_debug("Restoring regulatory settings while preserving user preference for: %c%c\n", user_alpha2[0], user_alpha2[1]); alpha2[0] = user_alpha2[0]; alpha2[1] = user_alpha2[1]; } } else if (!is_world_regdom(ieee80211_regdom)) { pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } else pr_debug("Restoring regulatory settings\n"); } static void restore_custom_reg_settings(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; enum nl80211_band band; struct ieee80211_channel *chan; int i; for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; chan->flags = chan->orig_flags; chan->max_antenna_gain = chan->orig_mag; chan->max_power = chan->orig_mpwr; chan->beacon_found = false; } } } /* * Restoring regulatory settings involves ignoring any * possibly stale country IE information and user regulatory * settings if so desired, this includes any beacon hints * learned as we could have traveled outside to another country * after disconnection. To restore regulatory settings we do * exactly what we did at bootup: * * - send a core regulatory hint * - send a user regulatory hint if applicable * * Device drivers that send a regulatory hint for a specific country * keep their own regulatory domain on wiphy->regd so that does * not need to be remembered. */ static void restore_regulatory_settings(bool reset_user, bool cached) { char alpha2[2]; char world_alpha2[2]; struct reg_beacon *reg_beacon, *btmp; LIST_HEAD(tmp_reg_req_list); struct cfg80211_registered_device *rdev; ASSERT_RTNL(); /* * Clear the indoor setting in case that it is not controlled by user * space, as otherwise there is no guarantee that the device is still * operating in an indoor environment. */ spin_lock(®_indoor_lock); if (reg_is_indoor && !reg_is_indoor_portid) { reg_is_indoor = false; reg_check_channels(); } spin_unlock(®_indoor_lock); reset_regdomains(true, &world_regdom); restore_alpha2(alpha2, reset_user); /* * If there's any pending requests we simply * stash them to a temporary pending queue and * add then after we've restored regulatory * settings. */ spin_lock(®_requests_lock); list_splice_tail_init(®_requests_list, &tmp_reg_req_list); spin_unlock(®_requests_lock); /* Clear beacon hints */ spin_lock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) { list_del(®_beacon->list); kfree(reg_beacon); } spin_unlock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) { list_del(®_beacon->list); kfree(reg_beacon); } /* First restore to the basic regulatory settings */ world_alpha2[0] = cfg80211_world_regdom->alpha2[0]; world_alpha2[1] = cfg80211_world_regdom->alpha2[1]; for_each_rdev(rdev) { if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) continue; if (rdev->wiphy.regulatory_flags & REGULATORY_CUSTOM_REG) restore_custom_reg_settings(&rdev->wiphy); } if (cached && (!is_an_alpha2(alpha2) || !IS_ERR_OR_NULL(cfg80211_user_regdom))) { reset_regdomains(false, cfg80211_world_regdom); update_all_wiphy_regulatory(NL80211_REGDOM_SET_BY_CORE); print_regdomain(get_cfg80211_regdom()); nl80211_send_reg_change_event(&core_request_world); reg_set_request_processed(); if (is_an_alpha2(alpha2) && !regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER)) { struct regulatory_request *ureq; spin_lock(®_requests_lock); ureq = list_last_entry(®_requests_list, struct regulatory_request, list); list_del(&ureq->list); spin_unlock(®_requests_lock); notify_self_managed_wiphys(ureq); reg_update_last_request(ureq); set_regdom(reg_copy_regd(cfg80211_user_regdom), REGD_SOURCE_CACHED); } } else { regulatory_hint_core(world_alpha2); /* * This restores the ieee80211_regdom module parameter * preference or the last user requested regulatory * settings, user regulatory settings takes precedence. */ if (is_an_alpha2(alpha2)) regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER); } spin_lock(®_requests_lock); list_splice_tail_init(&tmp_reg_req_list, ®_requests_list); spin_unlock(®_requests_lock); pr_debug("Kicking the queue\n"); schedule_work(®_work); } static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; for_each_rdev(rdev) { wiphy_lock(&rdev->wiphy); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!(wdev->wiphy->regulatory_flags & flag)) { wiphy_unlock(&rdev->wiphy); return false; } } wiphy_unlock(&rdev->wiphy); } return true; } void regulatory_hint_disconnect(void) { /* Restore of regulatory settings is not required when wiphy(s) * ignore IE from connected access point but clearance of beacon hints * is required when wiphy(s) supports beacon hints. */ if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) { struct reg_beacon *reg_beacon, *btmp; if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS)) return; spin_lock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) { list_del(®_beacon->list); kfree(reg_beacon); } spin_unlock_bh(®_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) { list_del(®_beacon->list); kfree(reg_beacon); } return; } pr_debug("All devices are disconnected, going to restore regulatory settings\n"); restore_regulatory_settings(false, true); } static bool freq_is_chan_12_13_14(u32 freq) { if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(14, NL80211_BAND_2GHZ)) return true; return false; } static bool pending_reg_beacon(struct ieee80211_channel *beacon_chan) { struct reg_beacon *pending_beacon; list_for_each_entry(pending_beacon, ®_pending_beacons, list) if (ieee80211_channel_equal(beacon_chan, &pending_beacon->chan)) return true; return false; } void regulatory_hint_found_beacon(struct wiphy *wiphy, struct ieee80211_channel *beacon_chan, gfp_t gfp) { struct reg_beacon *reg_beacon; bool processing; if (beacon_chan->beacon_found || beacon_chan->flags & IEEE80211_CHAN_RADAR || (beacon_chan->band == NL80211_BAND_2GHZ && !freq_is_chan_12_13_14(beacon_chan->center_freq))) return; spin_lock_bh(®_pending_beacons_lock); processing = pending_reg_beacon(beacon_chan); spin_unlock_bh(®_pending_beacons_lock); if (processing) return; reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp); if (!reg_beacon) return; pr_debug("Found new beacon on frequency: %d.%03d MHz (Ch %d) on %s\n", beacon_chan->center_freq, beacon_chan->freq_offset, ieee80211_freq_khz_to_channel( ieee80211_channel_to_khz(beacon_chan)), wiphy_name(wiphy)); memcpy(®_beacon->chan, beacon_chan, sizeof(struct ieee80211_channel)); /* * Since we can be called from BH or and non-BH context * we must use spin_lock_bh() */ spin_lock_bh(®_pending_beacons_lock); list_add_tail(®_beacon->list, ®_pending_beacons); spin_unlock_bh(®_pending_beacons_lock); schedule_work(®_work); } static void print_rd_rules(const struct ieee80211_regdomain *rd) { unsigned int i; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; const struct ieee80211_power_rule *power_rule = NULL; char bw[32], cac_time[32]; pr_debug(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n"); for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; freq_range = ®_rule->freq_range; power_rule = ®_rule->power_rule; if (reg_rule->flags & NL80211_RRF_AUTO_BW) snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO", freq_range->max_bandwidth_khz, reg_get_max_bandwidth(rd, reg_rule)); else snprintf(bw, sizeof(bw), "%d KHz", freq_range->max_bandwidth_khz); if (reg_rule->flags & NL80211_RRF_DFS) scnprintf(cac_time, sizeof(cac_time), "%u s", reg_rule->dfs_cac_ms/1000); else scnprintf(cac_time, sizeof(cac_time), "N/A"); /* * There may not be documentation for max antenna gain * in certain regions */ if (power_rule->max_antenna_gain) pr_debug(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, power_rule->max_antenna_gain, power_rule->max_eirp, cac_time); else pr_debug(" (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, power_rule->max_eirp, cac_time); } } bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region) { switch (dfs_region) { case NL80211_DFS_UNSET: case NL80211_DFS_FCC: case NL80211_DFS_ETSI: case NL80211_DFS_JP: return true; default: pr_debug("Ignoring unknown DFS master region: %d\n", dfs_region); return false; } } static void print_regdomain(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr = get_last_request(); if (is_intersected_alpha2(rd->alpha2)) { if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { struct cfg80211_registered_device *rdev; rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx); if (rdev) { pr_debug("Current regulatory domain updated by AP to: %c%c\n", rdev->country_ie_alpha2[0], rdev->country_ie_alpha2[1]); } else pr_debug("Current regulatory domain intersected:\n"); } else pr_debug("Current regulatory domain intersected:\n"); } else if (is_world_regdom(rd->alpha2)) { pr_debug("World regulatory domain updated:\n"); } else { if (is_unknown_alpha2(rd->alpha2)) pr_debug("Regulatory domain changed to driver built-in settings (unknown country)\n"); else { if (reg_request_cell_base(lr)) pr_debug("Regulatory domain changed to country: %c%c by Cell Station\n", rd->alpha2[0], rd->alpha2[1]); else pr_debug("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } } pr_debug(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region)); print_rd_rules(rd); } static void print_regdomain_info(const struct ieee80211_regdomain *rd) { pr_debug("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_rd_rules(rd); } static int reg_set_rd_core(const struct ieee80211_regdomain *rd) { if (!is_world_regdom(rd->alpha2)) return -EINVAL; update_world_regdomain(rd); return 0; } static int reg_set_rd_user(const struct ieee80211_regdomain *rd, struct regulatory_request *user_request) { const struct ieee80211_regdomain *intersected_rd = NULL; if (!regdom_changes(rd->alpha2)) return -EALREADY; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } if (!user_request->intersect) { reset_regdomains(false, rd); return 0; } intersected_rd = regdom_intersect(rd, get_cfg80211_regdom()); if (!intersected_rd) return -EINVAL; kfree(rd); rd = NULL; reset_regdomains(false, intersected_rd); return 0; } static int reg_set_rd_driver(const struct ieee80211_regdomain *rd, struct regulatory_request *driver_request) { const struct ieee80211_regdomain *regd; const struct ieee80211_regdomain *intersected_rd = NULL; const struct ieee80211_regdomain *tmp = NULL; struct wiphy *request_wiphy; if (is_world_regdom(rd->alpha2)) return -EINVAL; if (!regdom_changes(rd->alpha2)) return -EALREADY; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx); if (!request_wiphy) return -ENODEV; if (!driver_request->intersect) { ASSERT_RTNL(); wiphy_lock(request_wiphy); if (request_wiphy->regd) tmp = get_wiphy_regdom(request_wiphy); regd = reg_copy_regd(rd); if (IS_ERR(regd)) { wiphy_unlock(request_wiphy); return PTR_ERR(regd); } rcu_assign_pointer(request_wiphy->regd, regd); rcu_free_regdom(tmp); wiphy_unlock(request_wiphy); reset_regdomains(false, rd); return 0; } intersected_rd = regdom_intersect(rd, get_cfg80211_regdom()); if (!intersected_rd) return -EINVAL; /* * We can trash what CRDA provided now. * However if a driver requested this specific regulatory * domain we keep it for its private use */ tmp = get_wiphy_regdom(request_wiphy); rcu_assign_pointer(request_wiphy->regd, rd); rcu_free_regdom(tmp); rd = NULL; reset_regdomains(false, intersected_rd); return 0; } static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, struct regulatory_request *country_ie_request) { struct wiphy *request_wiphy; if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && !is_unknown_alpha2(rd->alpha2)) return -EINVAL; /* * Lets only bother proceeding on the same alpha2 if the current * rd is non static (it means CRDA was present and was used last) * and the pending request came in from a country IE */ if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx); if (!request_wiphy) return -ENODEV; if (country_ie_request->intersect) return -EINVAL; reset_regdomains(false, rd); return 0; } /* * Use this call to set the current regulatory domain. Conflicts with * multiple drivers can be ironed out later. Caller must've already * kmalloc'd the rd structure. */ int set_regdom(const struct ieee80211_regdomain *rd, enum ieee80211_regd_source regd_src) { struct regulatory_request *lr; bool user_reset = false; int r; if (IS_ERR_OR_NULL(rd)) return -ENODATA; if (!reg_is_valid_request(rd->alpha2)) { kfree(rd); return -EINVAL; } if (regd_src == REGD_SOURCE_CRDA) reset_crda_timeouts(); lr = get_last_request(); /* Note that this doesn't update the wiphys, this is done below */ switch (lr->initiator) { case NL80211_REGDOM_SET_BY_CORE: r = reg_set_rd_core(rd); break; case NL80211_REGDOM_SET_BY_USER: cfg80211_save_user_regdom(rd); r = reg_set_rd_user(rd, lr); user_reset = true; break; case NL80211_REGDOM_SET_BY_DRIVER: r = reg_set_rd_driver(rd, lr); break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: r = reg_set_rd_country_ie(rd, lr); break; default: WARN(1, "invalid initiator %d\n", lr->initiator); kfree(rd); return -EINVAL; } if (r) { switch (r) { case -EALREADY: reg_set_request_processed(); break; default: /* Back to world regulatory in case of errors */ restore_regulatory_settings(user_reset, false); } kfree(rd); return r; } /* This would make this whole thing pointless */ if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom())) return -EINVAL; /* update all wiphys now with the new established regulatory domain */ update_all_wiphy_regulatory(lr->initiator); print_regdomain(get_cfg80211_regdom()); nl80211_send_reg_change_event(lr); reg_set_request_processed(); return 0; } static int __regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { const struct ieee80211_regdomain *regd; const struct ieee80211_regdomain *prev_regd; struct cfg80211_registered_device *rdev; if (WARN_ON(!wiphy || !rd)) return -EINVAL; if (WARN(!(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED), "wiphy should have REGULATORY_WIPHY_SELF_MANAGED\n")) return -EPERM; if (WARN(!is_valid_rd(rd), "Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1])) { print_regdomain_info(rd); return -EINVAL; } regd = reg_copy_regd(rd); if (IS_ERR(regd)) return PTR_ERR(regd); rdev = wiphy_to_rdev(wiphy); spin_lock(®_requests_lock); prev_regd = rdev->requested_regd; rdev->requested_regd = regd; spin_unlock(®_requests_lock); kfree(prev_regd); return 0; } int regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { int ret = __regulatory_set_wiphy_regd(wiphy, rd); if (ret) return ret; schedule_work(®_work); return 0; } EXPORT_SYMBOL(regulatory_set_wiphy_regd); int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { int ret; ASSERT_RTNL(); ret = __regulatory_set_wiphy_regd(wiphy, rd); if (ret) return ret; /* process the request immediately */ reg_process_self_managed_hint(wiphy); reg_check_channels(); return 0; } EXPORT_SYMBOL(regulatory_set_wiphy_regd_sync); void wiphy_regulatory_register(struct wiphy *wiphy) { struct regulatory_request *lr = get_last_request(); /* self-managed devices ignore beacon hints and country IE */ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS | REGULATORY_COUNTRY_IE_IGNORE; /* * The last request may have been received before this * registration call. Call the driver notifier if * initiator is USER. */ if (lr->initiator == NL80211_REGDOM_SET_BY_USER) reg_call_notifier(wiphy, lr); } if (!reg_dev_ignore_cell_hint(wiphy)) reg_num_devs_support_basehint++; wiphy_update_regulatory(wiphy, lr->initiator); wiphy_all_share_dfs_chan_state(wiphy); reg_process_self_managed_hints(); } void wiphy_regulatory_deregister(struct wiphy *wiphy) { struct wiphy *request_wiphy = NULL; struct regulatory_request *lr; lr = get_last_request(); if (!reg_dev_ignore_cell_hint(wiphy)) reg_num_devs_support_basehint--; rcu_free_regdom(get_wiphy_regdom(wiphy)); RCU_INIT_POINTER(wiphy->regd, NULL); if (lr) request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); if (!request_wiphy || request_wiphy != wiphy) return; lr->wiphy_idx = WIPHY_IDX_INVALID; lr->country_ie_env = ENVIRON_ANY; } /* * See FCC notices for UNII band definitions * 5GHz: https://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii * 6GHz: https://www.fcc.gov/document/fcc-proposes-more-spectrum-unlicensed-use-0 */ int cfg80211_get_unii(int freq) { /* UNII-1 */ if (freq >= 5150 && freq <= 5250) return 0; /* UNII-2A */ if (freq > 5250 && freq <= 5350) return 1; /* UNII-2B */ if (freq > 5350 && freq <= 5470) return 2; /* UNII-2C */ if (freq > 5470 && freq <= 5725) return 3; /* UNII-3 */ if (freq > 5725 && freq <= 5825) return 4; /* UNII-5 */ if (freq > 5925 && freq <= 6425) return 5; /* UNII-6 */ if (freq > 6425 && freq <= 6525) return 6; /* UNII-7 */ if (freq > 6525 && freq <= 6875) return 7; /* UNII-8 */ if (freq > 6875 && freq <= 7125) return 8; return -EINVAL; } bool regulatory_indoor_allowed(void) { return reg_is_indoor; } bool regulatory_pre_cac_allowed(struct wiphy *wiphy) { const struct ieee80211_regdomain *regd = NULL; const struct ieee80211_regdomain *wiphy_regd = NULL; bool pre_cac_allowed = false; rcu_read_lock(); regd = rcu_dereference(cfg80211_regdomain); wiphy_regd = rcu_dereference(wiphy->regd); if (!wiphy_regd) { if (regd->dfs_region == NL80211_DFS_ETSI) pre_cac_allowed = true; rcu_read_unlock(); return pre_cac_allowed; } if (regd->dfs_region == wiphy_regd->dfs_region && wiphy_regd->dfs_region == NL80211_DFS_ETSI) pre_cac_allowed = true; rcu_read_unlock(); return pre_cac_allowed; } EXPORT_SYMBOL(regulatory_pre_cac_allowed); static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; unsigned int link_id; /* If we finished CAC or received radar, we should end any * CAC running on the same channels. * the check !cfg80211_chandef_dfs_usable contain 2 options: * either all channels are available - those the CAC_FINISHED * event has effected another wdev state, or there is a channel * in unavailable state in wdev chandef - those the RADAR_DETECTED * event has effected another wdev state. * In both cases we should end the CAC on the wdev. */ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { struct cfg80211_chan_def *chandef; for_each_valid_link(wdev, link_id) { if (!wdev->links[link_id].cac_started) continue; chandef = wdev_chandef(wdev, link_id); if (!chandef) continue; if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef)) rdev_end_cac(rdev, wdev->netdev, link_id); } } } void regulatory_propagate_dfs_state(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state, enum nl80211_radar_event event) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); if (WARN_ON(!cfg80211_chandef_valid(chandef))) return; for_each_rdev(rdev) { if (wiphy == &rdev->wiphy) continue; if (!reg_dfs_domain_same(wiphy, &rdev->wiphy)) continue; if (!ieee80211_get_channel(&rdev->wiphy, chandef->chan->center_freq)) continue; cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state); if (event == NL80211_RADAR_DETECTED || event == NL80211_RADAR_CAC_FINISHED) { cfg80211_sched_dfs_chan_update(rdev); cfg80211_check_and_end_cac(rdev); } nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL); } } static int __init regulatory_init_db(void) { int err; /* * It's possible that - due to other bugs/issues - cfg80211 * never called regulatory_init() below, or that it failed; * in that case, don't try to do any further work here as * it's doomed to lead to crashes. */ if (IS_ERR_OR_NULL(reg_pdev)) return -EINVAL; err = load_builtin_regdb_keys(); if (err) { platform_device_unregister(reg_pdev); return err; } /* We always try to get an update for the static regdomain */ err = regulatory_hint_core(cfg80211_world_regdom->alpha2); if (err) { if (err == -ENOMEM) { platform_device_unregister(reg_pdev); return err; } /* * N.B. kobject_uevent_env() can fail mainly for when we're out * memory which is handled and propagated appropriately above * but it can also fail during a netlink_broadcast() or during * early boot for call_usermodehelper(). For now treat these * errors as non-fatal. */ pr_err("kobject_uevent_env() was unable to call CRDA during init\n"); } /* * Finally, if the user set the module parameter treat it * as a user hint. */ if (!is_world_regdom(ieee80211_regdom)) regulatory_hint_user(ieee80211_regdom, NL80211_USER_REG_HINT_USER); return 0; } #ifndef MODULE late_initcall(regulatory_init_db); #endif int __init regulatory_init(void) { reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); if (IS_ERR(reg_pdev)) return PTR_ERR(reg_pdev); rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom); user_alpha2[0] = '9'; user_alpha2[1] = '7'; #ifdef MODULE return regulatory_init_db(); #else return 0; #endif } void regulatory_exit(void) { struct regulatory_request *reg_request, *tmp; struct reg_beacon *reg_beacon, *btmp; cancel_work_sync(®_work); cancel_crda_timeout_sync(); cancel_delayed_work_sync(®_check_chans); /* Lock to suppress warnings */ rtnl_lock(); reset_regdomains(true, NULL); rtnl_unlock(); dev_set_uevent_suppress(®_pdev->dev, true); platform_device_unregister(reg_pdev); list_for_each_entry_safe(reg_beacon, btmp, ®_pending_beacons, list) { list_del(®_beacon->list); kfree(reg_beacon); } list_for_each_entry_safe(reg_beacon, btmp, ®_beacon_list, list) { list_del(®_beacon->list); kfree(reg_beacon); } list_for_each_entry_safe(reg_request, tmp, ®_requests_list, list) { list_del(®_request->list); kfree(reg_request); } if (!IS_ERR_OR_NULL(regdb)) kfree(regdb); if (!IS_ERR_OR_NULL(cfg80211_user_regdom)) kfree(cfg80211_user_regdom); free_regdb_keyring(); } |
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 | /* * linux/drivers/video/modedb.c -- Standard video mode database management * * Copyright (C) 1999 Geert Uytterhoeven * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/kernel.h> #undef DEBUG #define name_matches(v, s, l) \ ((v).name && !strncmp((s), (v).name, (l)) && strlen((v).name) == (l)) #define res_matches(v, x, y) \ ((v).xres == (x) && (v).yres == (y)) #ifdef DEBUG #define DPRINTK(fmt, args...) printk("modedb %s: " fmt, __func__ , ## args) #else #define DPRINTK(fmt, args...) #endif /* * Standard video mode definitions (taken from XFree86) */ static const struct fb_videomode modedb[] = { /* 640x400 @ 70 Hz, 31.5 kHz hsync */ { NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, 0, FB_VMODE_NONINTERLACED }, /* 640x480 @ 60 Hz, 31.5 kHz hsync */ { NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }, /* 800x600 @ 56 Hz, 35.15 kHz hsync */ { NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */ { NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, 0, FB_VMODE_INTERLACED }, /* 640x400 @ 85 Hz, 37.86 kHz hsync */ { NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 640x480 @ 72 Hz, 36.5 kHz hsync */ { NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, 0, FB_VMODE_NONINTERLACED }, /* 640x480 @ 75 Hz, 37.50 kHz hsync */ { NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, 0, FB_VMODE_NONINTERLACED }, /* 800x600 @ 60 Hz, 37.8 kHz hsync */ { NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 640x480 @ 85 Hz, 43.27 kHz hsync */ { NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */ { NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, 0, FB_VMODE_INTERLACED }, /* 800x600 @ 72 Hz, 48.0 kHz hsync */ { NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 60 Hz, 48.4 kHz hsync */ { NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, 0, FB_VMODE_NONINTERLACED }, /* 640x480 @ 100 Hz, 53.01 kHz hsync */ { NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 60 Hz, 53.5 kHz hsync */ { NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, 0, FB_VMODE_NONINTERLACED }, /* 800x600 @ 85 Hz, 55.84 kHz hsync */ { NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 70 Hz, 56.5 kHz hsync */ { NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */ { NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, 0, FB_VMODE_INTERLACED }, /* 800x600 @ 100 Hz, 64.02 kHz hsync */ { NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 76 Hz, 62.5 kHz hsync */ { NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 70 Hz, 62.4 kHz hsync */ { NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */ { NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, 0, FB_VMODE_NONINTERLACED }, /* 1400x1050 @ 60Hz, 63.9 kHz hsync */ { NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, 0, FB_VMODE_NONINTERLACED }, /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/ { NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/ { NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 85 Hz, 70.24 kHz hsync */ { NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 78 Hz, 70.8 kHz hsync */ { NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */ { NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, 0, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 60Hz, 75.00 kHz hsync */ { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 84 Hz, 76.0 kHz hsync */ { NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */ { NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 100Hz, 80.21 kHz hsync */ { NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */ { NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, 0, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */ { NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 100 Hz, 89.62 kHz hsync */ { NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */ { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */ { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */ { NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ { NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */ { NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, 0, FB_VMODE_NONINTERLACED }, /* 1800x1440 @ 64Hz, 96.15 kHz hsync */ { NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1800x1440 @ 70Hz, 104.52 kHz hsync */ { NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 512x384 @ 78 Hz, 31.50 kHz hsync */ { NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, 0, FB_VMODE_NONINTERLACED }, /* 512x384 @ 85 Hz, 34.38 kHz hsync */ { NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, 0, FB_VMODE_NONINTERLACED }, /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */ { NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, 0, FB_VMODE_DOUBLE }, /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */ { NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, 0, FB_VMODE_DOUBLE }, /* 320x240 @ 72 Hz, 36.5 kHz hsync */ { NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, 0, FB_VMODE_DOUBLE }, /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */ { NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, 0, FB_VMODE_DOUBLE }, /* 400x300 @ 60 Hz, 37.8 kHz hsync */ { NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, 0, FB_VMODE_DOUBLE }, /* 400x300 @ 72 Hz, 48.0 kHz hsync */ { NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */ { NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 60 Hz, 37.8 kHz hsync */ { NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 63 Hz, 39.6 kHz hsync */ { NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 72 Hz, 48.0 kHz hsync */ { NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0, FB_VMODE_DOUBLE }, /* 1920x1080 @ 60 Hz, 67.3 kHz hsync */ { NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ { NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */ { NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */ { NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, 0, FB_VMODE_NONINTERLACED }, /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */ { NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 0, FB_VMODE_NONINTERLACED }, /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ { NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, 0, FB_VMODE_INTERLACED }, /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ { NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, 0, FB_VMODE_INTERLACED }, /* 864x480 @ 60 Hz, 35.15 kHz hsync */ { NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0, 0, FB_VMODE_NONINTERLACED }, }; #ifdef CONFIG_FB_MODE_HELPERS const struct fb_videomode vesa_modes[] = { /* 0 640x350-85 VESA */ { NULL, 85, 640, 350, 31746, 96, 32, 60, 32, 64, 3, FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA}, /* 1 640x400-85 VESA */ { NULL, 85, 640, 400, 31746, 96, 32, 41, 01, 64, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 2 720x400-85 VESA */ { NULL, 85, 721, 400, 28169, 108, 36, 42, 01, 72, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 3 640x480-60 VESA */ { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 4 640x480-72 VESA */ { NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 5 640x480-75 VESA */ { NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 6 640x480-85 VESA */ { NULL, 85, 640, 480, 27777, 80, 56, 25, 01, 56, 3, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 7 800x600-56 VESA */ { NULL, 56, 800, 600, 27777, 128, 24, 22, 01, 72, 2, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 8 800x600-60 VESA */ { NULL, 60, 800, 600, 25000, 88, 40, 23, 01, 128, 4, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 9 800x600-72 VESA */ { NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 10 800x600-75 VESA */ { NULL, 75, 800, 600, 20202, 160, 16, 21, 01, 80, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 11 800x600-85 VESA */ { NULL, 85, 800, 600, 17761, 152, 32, 27, 01, 64, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 12 1024x768i-43 VESA */ { NULL, 43, 1024, 768, 22271, 56, 8, 41, 0, 176, 8, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, FB_MODE_IS_VESA }, /* 13 1024x768-60 VESA */ { NULL, 60, 1024, 768, 15384, 160, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 14 1024x768-70 VESA */ { NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 15 1024x768-75 VESA */ { NULL, 75, 1024, 768, 12690, 176, 16, 28, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 16 1024x768-85 VESA */ { NULL, 85, 1024, 768, 10582, 208, 48, 36, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 17 1152x864-75 VESA */ { NULL, 75, 1152, 864, 9259, 256, 64, 32, 1, 128, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 18 1280x960-60 VESA */ { NULL, 60, 1280, 960, 9259, 312, 96, 36, 1, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 19 1280x960-85 VESA */ { NULL, 85, 1280, 960, 6734, 224, 64, 47, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 20 1280x1024-60 VESA */ { NULL, 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 21 1280x1024-75 VESA */ { NULL, 75, 1280, 1024, 7407, 248, 16, 38, 1, 144, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 22 1280x1024-85 VESA */ { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 23 1600x1200-60 VESA */ { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 24 1600x1200-65 VESA */ { NULL, 65, 1600, 1200, 5698, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 25 1600x1200-70 VESA */ { NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 26 1600x1200-75 VESA */ { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 27 1600x1200-85 VESA */ { NULL, 85, 1600, 1200, 4357, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 28 1792x1344-60 VESA */ { NULL, 60, 1792, 1344, 4882, 328, 128, 46, 1, 200, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 29 1792x1344-75 VESA */ { NULL, 75, 1792, 1344, 3831, 352, 96, 69, 1, 216, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 30 1856x1392-60 VESA */ { NULL, 60, 1856, 1392, 4580, 352, 96, 43, 1, 224, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 31 1856x1392-75 VESA */ { NULL, 75, 1856, 1392, 3472, 352, 128, 104, 1, 224, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 32 1920x1440-60 VESA */ { NULL, 60, 1920, 1440, 4273, 344, 128, 56, 1, 200, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 33 1920x1440-75 VESA */ { NULL, 75, 1920, 1440, 3367, 352, 144, 56, 1, 224, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 34 1920x1200-60 RB VESA */ { NULL, 60, 1920, 1200, 6493, 80, 48, 26, 3, 32, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 35 1920x1200-60 VESA */ { NULL, 60, 1920, 1200, 5174, 336, 136, 36, 3, 200, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 36 1920x1200-75 VESA */ { NULL, 75, 1920, 1200, 4077, 344, 136, 46, 3, 208, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 37 1920x1200-85 VESA */ { NULL, 85, 1920, 1200, 3555, 352, 144, 53, 3, 208, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 38 2560x1600-60 RB VESA */ { NULL, 60, 2560, 1600, 3724, 80, 48, 37, 3, 32, 6, FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 39 2560x1600-60 VESA */ { NULL, 60, 2560, 1600, 2869, 472, 192, 49, 3, 280, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 40 2560x1600-75 VESA */ { NULL, 75, 2560, 1600, 2256, 488, 208, 63, 3, 280, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 41 2560x1600-85 VESA */ { NULL, 85, 2560, 1600, 1979, 488, 208, 73, 3, 280, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 42 2560x1600-120 RB VESA */ { NULL, 120, 2560, 1600, 1809, 80, 48, 85, 3, 32, 6, FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, }; EXPORT_SYMBOL(vesa_modes); const struct dmt_videomode dmt_modes[DMT_SIZE] = { { 0x01, 0x0000, 0x000000, &vesa_modes[0] }, { 0x02, 0x3119, 0x000000, &vesa_modes[1] }, { 0x03, 0x0000, 0x000000, &vesa_modes[2] }, { 0x04, 0x3140, 0x000000, &vesa_modes[3] }, { 0x05, 0x314c, 0x000000, &vesa_modes[4] }, { 0x06, 0x314f, 0x000000, &vesa_modes[5] }, { 0x07, 0x3159, 0x000000, &vesa_modes[6] }, { 0x08, 0x0000, 0x000000, &vesa_modes[7] }, { 0x09, 0x4540, 0x000000, &vesa_modes[8] }, { 0x0a, 0x454c, 0x000000, &vesa_modes[9] }, { 0x0b, 0x454f, 0x000000, &vesa_modes[10] }, { 0x0c, 0x4559, 0x000000, &vesa_modes[11] }, { 0x0d, 0x0000, 0x000000, NULL }, { 0x0e, 0x0000, 0x000000, NULL }, { 0x0f, 0x0000, 0x000000, &vesa_modes[12] }, { 0x10, 0x6140, 0x000000, &vesa_modes[13] }, { 0x11, 0x614a, 0x000000, &vesa_modes[14] }, { 0x12, 0x614f, 0x000000, &vesa_modes[15] }, { 0x13, 0x6159, 0x000000, &vesa_modes[16] }, { 0x14, 0x0000, 0x000000, NULL }, { 0x15, 0x714f, 0x000000, &vesa_modes[17] }, { 0x16, 0x0000, 0x7f1c21, NULL }, { 0x17, 0x0000, 0x7f1c28, NULL }, { 0x18, 0x0000, 0x7f1c44, NULL }, { 0x19, 0x0000, 0x7f1c62, NULL }, { 0x1a, 0x0000, 0x000000, NULL }, { 0x1b, 0x0000, 0x8f1821, NULL }, { 0x1c, 0x8100, 0x8f1828, NULL }, { 0x1d, 0x810f, 0x8f1844, NULL }, { 0x1e, 0x8119, 0x8f1862, NULL }, { 0x1f, 0x0000, 0x000000, NULL }, { 0x20, 0x8140, 0x000000, &vesa_modes[18] }, { 0x21, 0x8159, 0x000000, &vesa_modes[19] }, { 0x22, 0x0000, 0x000000, NULL }, { 0x23, 0x8180, 0x000000, &vesa_modes[20] }, { 0x24, 0x818f, 0x000000, &vesa_modes[21] }, { 0x25, 0x8199, 0x000000, &vesa_modes[22] }, { 0x26, 0x0000, 0x000000, NULL }, { 0x27, 0x0000, 0x000000, NULL }, { 0x28, 0x0000, 0x000000, NULL }, { 0x29, 0x0000, 0x0c2021, NULL }, { 0x2a, 0x9040, 0x0c2028, NULL }, { 0x2b, 0x904f, 0x0c2044, NULL }, { 0x2c, 0x9059, 0x0c2062, NULL }, { 0x2d, 0x0000, 0x000000, NULL }, { 0x2e, 0x9500, 0xc11821, NULL }, { 0x2f, 0x9500, 0xc11828, NULL }, { 0x30, 0x950f, 0xc11844, NULL }, { 0x31, 0x9519, 0xc11868, NULL }, { 0x32, 0x0000, 0x000000, NULL }, { 0x33, 0xa940, 0x000000, &vesa_modes[23] }, { 0x34, 0xa945, 0x000000, &vesa_modes[24] }, { 0x35, 0xa94a, 0x000000, &vesa_modes[25] }, { 0x36, 0xa94f, 0x000000, &vesa_modes[26] }, { 0x37, 0xa959, 0x000000, &vesa_modes[27] }, { 0x38, 0x0000, 0x000000, NULL }, { 0x39, 0x0000, 0x0c2821, NULL }, { 0x3a, 0xb300, 0x0c2828, NULL }, { 0x3b, 0xb30f, 0x0c2844, NULL }, { 0x3c, 0xb319, 0x0c2868, NULL }, { 0x3d, 0x0000, 0x000000, NULL }, { 0x3e, 0xc140, 0x000000, &vesa_modes[28] }, { 0x3f, 0xc14f, 0x000000, &vesa_modes[29] }, { 0x40, 0x0000, 0x000000, NULL}, { 0x41, 0xc940, 0x000000, &vesa_modes[30] }, { 0x42, 0xc94f, 0x000000, &vesa_modes[31] }, { 0x43, 0x0000, 0x000000, NULL }, { 0x44, 0x0000, 0x572821, &vesa_modes[34] }, { 0x45, 0xd100, 0x572828, &vesa_modes[35] }, { 0x46, 0xd10f, 0x572844, &vesa_modes[36] }, { 0x47, 0xd119, 0x572862, &vesa_modes[37] }, { 0x48, 0x0000, 0x000000, NULL }, { 0x49, 0xd140, 0x000000, &vesa_modes[32] }, { 0x4a, 0xd14f, 0x000000, &vesa_modes[33] }, { 0x4b, 0x0000, 0x000000, NULL }, { 0x4c, 0x0000, 0x1f3821, &vesa_modes[38] }, { 0x4d, 0x0000, 0x1f3828, &vesa_modes[39] }, { 0x4e, 0x0000, 0x1f3844, &vesa_modes[40] }, { 0x4f, 0x0000, 0x1f3862, &vesa_modes[41] }, { 0x50, 0x0000, 0x000000, &vesa_modes[42] }, }; EXPORT_SYMBOL(dmt_modes); #endif /* CONFIG_FB_MODE_HELPERS */ /** * fb_try_mode - test a video mode * @var: frame buffer user defined part of display * @info: frame buffer info structure * @mode: frame buffer video mode structure * @bpp: color depth in bits per pixel * * Tries a video mode to test it's validity for device @info. * * Returns 1 on success. * */ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info, const struct fb_videomode *mode, unsigned int bpp) { int err = 0; DPRINTK("Trying mode %s %dx%d-%d@%d\n", mode->name ? mode->name : "noname", mode->xres, mode->yres, bpp, mode->refresh); var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = mode->xres; var->yres_virtual = mode->yres; var->xoffset = 0; var->yoffset = 0; var->bits_per_pixel = bpp; var->activate |= FB_ACTIVATE_TEST; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = mode->vmode; if (info->fbops->fb_check_var) err = info->fbops->fb_check_var(var, info); var->activate &= ~FB_ACTIVATE_TEST; return err; } /** * fb_find_mode - finds a valid video mode * @var: frame buffer user defined part of display * @info: frame buffer info structure * @mode_option: string video mode to find * @db: video mode database * @dbsize: size of @db * @default_mode: default video mode to fall back to * @default_bpp: default color depth in bits per pixel * * Finds a suitable video mode, starting with the specified mode * in @mode_option with fallback to @default_mode. If * @default_mode fails, all modes in the video mode database will * be tried. * * Valid mode specifiers for @mode_option:: * * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][p][m] * * or :: * * <name>[-<bpp>][@<refresh>] * * with <xres>, <yres>, <bpp> and <refresh> decimal numbers and * <name> a string. * * If 'M' is present after yres (and before refresh/bpp if present), * the function will compute the timings using VESA(tm) Coordinated * Video Timings (CVT). If 'R' is present after 'M', will compute with * reduced blanking (for flatpanels). If 'i' or 'p' are present, compute * interlaced or progressive mode. If 'm' is present, add margins equal * to 1.8% of xres rounded down to 8 pixels, and 1.8% of yres. The char * 'i', 'p' and 'm' must be after 'M' and 'R'. Example:: * * 1024x768MR-8@60m - Reduced blank with margins at 60Hz. * * NOTE: The passed struct @var is _not_ cleared! This allows you * to supply values for e.g. the grayscale and accel_flags fields. * * Returns zero for failure, 1 if using specified @mode_option, * 2 if using specified @mode_option with an ignored refresh rate, * 3 if default mode is used, 4 if fall back to any valid mode. */ int fb_find_mode(struct fb_var_screeninfo *var, struct fb_info *info, const char *mode_option, const struct fb_videomode *db, unsigned int dbsize, const struct fb_videomode *default_mode, unsigned int default_bpp) { char *mode_option_buf = NULL; int i; /* Set up defaults */ if (!db) { db = modedb; dbsize = ARRAY_SIZE(modedb); } if (!default_mode) default_mode = &db[0]; if (!default_bpp) default_bpp = 8; /* Did the user specify a video mode? */ if (!mode_option) { fb_get_options(NULL, &mode_option_buf); mode_option = mode_option_buf; } if (mode_option) { const char *name = mode_option; unsigned int namelen = strlen(name); int res_specified = 0, bpp_specified = 0, refresh_specified = 0; unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0; int yres_specified = 0, cvt = 0, rb = 0; int interlace_specified = 0, interlace = 0; int margins = 0; u32 best, diff, tdiff; for (i = namelen-1; i >= 0; i--) { switch (name[i]) { case '@': namelen = i; if (!refresh_specified && !bpp_specified && !yres_specified) { refresh = simple_strtol(&name[i+1], NULL, 10); refresh_specified = 1; if (cvt || rb) cvt = 0; } else goto done; break; case '-': namelen = i; if (!bpp_specified && !yres_specified) { bpp = simple_strtol(&name[i+1], NULL, 10); bpp_specified = 1; if (cvt || rb) cvt = 0; } else goto done; break; case 'x': if (!yres_specified) { yres = simple_strtol(&name[i+1], NULL, 10); yres_specified = 1; } else goto done; break; case '0' ... '9': break; case 'M': if (!yres_specified) cvt = 1; break; case 'R': if (!cvt) rb = 1; break; case 'm': if (!cvt) margins = 1; break; case 'p': if (!cvt) { interlace = 0; interlace_specified = 1; } break; case 'i': if (!cvt) { interlace = 1; interlace_specified = 1; } break; default: goto done; } } if (i < 0 && yres_specified) { xres = simple_strtol(name, NULL, 10); res_specified = 1; } done: kfree(mode_option_buf); if (cvt) { struct fb_videomode cvt_mode; int ret; DPRINTK("CVT mode %dx%d@%dHz%s%s%s\n", xres, yres, (refresh) ? refresh : 60, (rb) ? " reduced blanking" : "", (margins) ? " with margins" : "", (interlace) ? " interlaced" : ""); memset(&cvt_mode, 0, sizeof(cvt_mode)); cvt_mode.xres = xres; cvt_mode.yres = yres; cvt_mode.refresh = (refresh) ? refresh : 60; if (interlace) cvt_mode.vmode |= FB_VMODE_INTERLACED; else cvt_mode.vmode &= ~FB_VMODE_INTERLACED; ret = fb_find_mode_cvt(&cvt_mode, margins, rb); if (!ret && !fb_try_mode(var, info, &cvt_mode, bpp)) { DPRINTK("modedb CVT: CVT mode ok\n"); return 1; } DPRINTK("CVT mode invalid, getting mode from database\n"); } DPRINTK("Trying specified video mode%s %ix%i\n", refresh_specified ? "" : " (ignoring refresh rate)", xres, yres); if (!refresh_specified) { /* * If the caller has provided a custom mode database and * a valid monspecs structure, we look for the mode with * the highest refresh rate. Otherwise we play it safe * it and try to find a mode with a refresh rate closest * to the standard 60 Hz. */ if (db != modedb && info->monspecs.vfmin && info->monspecs.vfmax && info->monspecs.hfmin && info->monspecs.hfmax && info->monspecs.dclkmax) { refresh = 1000; } else { refresh = 60; } } diff = -1; best = -1; for (i = 0; i < dbsize; i++) { if ((name_matches(db[i], name, namelen) || (res_specified && res_matches(db[i], xres, yres))) && !fb_try_mode(var, info, &db[i], bpp)) { const int db_interlace = (db[i].vmode & FB_VMODE_INTERLACED ? 1 : 0); int score = abs(db[i].refresh - refresh); if (interlace_specified) score += abs(db_interlace - interlace); if (!interlace_specified || db_interlace == interlace) if (refresh_specified && db[i].refresh == refresh) return 1; if (score < diff) { diff = score; best = i; } } } if (best != -1) { fb_try_mode(var, info, &db[best], bpp); return (refresh_specified) ? 2 : 1; } diff = 2 * (xres + yres); best = -1; DPRINTK("Trying best-fit modes\n"); for (i = 0; i < dbsize; i++) { DPRINTK("Trying %ix%i\n", db[i].xres, db[i].yres); if (!fb_try_mode(var, info, &db[i], bpp)) { tdiff = abs(db[i].xres - xres) + abs(db[i].yres - yres); /* * Penalize modes with resolutions smaller * than requested. */ if (xres > db[i].xres || yres > db[i].yres) tdiff += xres + yres; if (diff > tdiff) { diff = tdiff; best = i; } } } if (best != -1) { fb_try_mode(var, info, &db[best], bpp); return 5; } } DPRINTK("Trying default video mode\n"); if (!fb_try_mode(var, info, default_mode, default_bpp)) return 3; DPRINTK("Trying all modes\n"); for (i = 0; i < dbsize; i++) if (!fb_try_mode(var, info, &db[i], default_bpp)) return 4; DPRINTK("No valid mode found\n"); return 0; } /** * fb_var_to_videomode - convert fb_var_screeninfo to fb_videomode * @mode: pointer to struct fb_videomode * @var: pointer to struct fb_var_screeninfo */ void fb_var_to_videomode(struct fb_videomode *mode, const struct fb_var_screeninfo *var) { u32 pixclock, hfreq, htotal, vtotal; mode->name = NULL; mode->xres = var->xres; mode->yres = var->yres; mode->pixclock = var->pixclock; mode->hsync_len = var->hsync_len; mode->vsync_len = var->vsync_len; mode->left_margin = var->left_margin; mode->right_margin = var->right_margin; mode->upper_margin = var->upper_margin; mode->lower_margin = var->lower_margin; mode->sync = var->sync; mode->vmode = var->vmode & FB_VMODE_MASK; mode->flag = FB_MODE_IS_FROM_VAR; mode->refresh = 0; if (!var->pixclock) return; pixclock = PICOS2KHZ(var->pixclock) * 1000; htotal = var->xres + var->right_margin + var->hsync_len + var->left_margin; vtotal = var->yres + var->lower_margin + var->vsync_len + var->upper_margin; if (var->vmode & FB_VMODE_INTERLACED) vtotal /= 2; if (var->vmode & FB_VMODE_DOUBLE) vtotal *= 2; if (!htotal || !vtotal) return; hfreq = pixclock/htotal; mode->refresh = hfreq/vtotal; } /** * fb_videomode_to_var - convert fb_videomode to fb_var_screeninfo * @var: pointer to struct fb_var_screeninfo * @mode: pointer to struct fb_videomode */ void fb_videomode_to_var(struct fb_var_screeninfo *var, const struct fb_videomode *mode) { var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = mode->xres; var->yres_virtual = mode->yres; var->xoffset = 0; var->yoffset = 0; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = mode->vmode & FB_VMODE_MASK; } /** * fb_mode_is_equal - compare 2 videomodes * @mode1: first videomode * @mode2: second videomode * * RETURNS: * 1 if equal, 0 if not */ int fb_mode_is_equal(const struct fb_videomode *mode1, const struct fb_videomode *mode2) { return (mode1->xres == mode2->xres && mode1->yres == mode2->yres && mode1->pixclock == mode2->pixclock && mode1->hsync_len == mode2->hsync_len && mode1->vsync_len == mode2->vsync_len && mode1->left_margin == mode2->left_margin && mode1->right_margin == mode2->right_margin && mode1->upper_margin == mode2->upper_margin && mode1->lower_margin == mode2->lower_margin && mode1->sync == mode2->sync && mode1->vmode == mode2->vmode); } /** * fb_find_best_mode - find best matching videomode * @var: pointer to struct fb_var_screeninfo * @head: pointer to struct list_head of modelist * * RETURNS: * struct fb_videomode, NULL if none found * * IMPORTANT: * This function assumes that all modelist entries in * info->modelist are valid. * * NOTES: * Finds best matching videomode which has an equal or greater dimension than * var->xres and var->yres. If more than 1 videomode is found, will return * the videomode with the highest refresh rate */ const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *mode, *best = NULL; u32 diff = -1; list_for_each_entry(modelist, head, list) { u32 d; mode = &modelist->mode; if (mode->xres >= var->xres && mode->yres >= var->yres) { d = (mode->xres - var->xres) + (mode->yres - var->yres); if (diff > d) { diff = d; best = mode; } else if (diff == d && best && mode->refresh > best->refresh) best = mode; } } return best; } /** * fb_find_nearest_mode - find closest videomode * * @mode: pointer to struct fb_videomode * @head: pointer to modelist * * Finds best matching videomode, smaller or greater in dimension. * If more than 1 videomode is found, will return the videomode with * the closest refresh rate. */ const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *cmode, *best = NULL; u32 diff = -1, diff_refresh = -1; list_for_each_entry(modelist, head, list) { u32 d; cmode = &modelist->mode; d = abs(cmode->xres - mode->xres) + abs(cmode->yres - mode->yres); if (diff > d) { diff = d; diff_refresh = abs(cmode->refresh - mode->refresh); best = cmode; } else if (diff == d) { d = abs(cmode->refresh - mode->refresh); if (diff_refresh > d) { diff_refresh = d; best = cmode; } } } return best; } /** * fb_match_mode - find a videomode which exactly matches the timings in var * @var: pointer to struct fb_var_screeninfo * @head: pointer to struct list_head of modelist * * RETURNS: * struct fb_videomode, NULL if none found */ const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *m, mode; fb_var_to_videomode(&mode, var); list_for_each_entry(modelist, head, list) { m = &modelist->mode; if (fb_mode_is_equal(m, &mode)) return m; } return NULL; } /** * fb_add_videomode - adds videomode entry to modelist * @mode: videomode to add * @head: struct list_head of modelist * * NOTES: * Will only add unmatched mode entries */ int fb_add_videomode(const struct fb_videomode *mode, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *m; int found = 0; list_for_each_entry(modelist, head, list) { m = &modelist->mode; if (fb_mode_is_equal(m, mode)) { found = 1; break; } } if (!found) { modelist = kmalloc(sizeof(struct fb_modelist), GFP_KERNEL); if (!modelist) return -ENOMEM; modelist->mode = *mode; list_add(&modelist->list, head); } return 0; } /** * fb_delete_videomode - removed videomode entry from modelist * @mode: videomode to remove * @head: struct list_head of modelist * * NOTES: * Will remove all matching mode entries */ void fb_delete_videomode(const struct fb_videomode *mode, struct list_head *head) { struct list_head *pos, *n; struct fb_modelist *modelist; struct fb_videomode *m; list_for_each_safe(pos, n, head) { modelist = list_entry(pos, struct fb_modelist, list); m = &modelist->mode; if (fb_mode_is_equal(m, mode)) { list_del(pos); kfree(pos); } } } /** * fb_destroy_modelist - destroy modelist * @head: struct list_head of modelist */ void fb_destroy_modelist(struct list_head *head) { struct list_head *pos, *n; list_for_each_safe(pos, n, head) { list_del(pos); kfree(pos); } } EXPORT_SYMBOL_GPL(fb_destroy_modelist); /** * fb_videomode_to_modelist - convert mode array to mode list * @modedb: array of struct fb_videomode * @num: number of entries in array * @head: struct list_head of modelist */ void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num, struct list_head *head) { int i; INIT_LIST_HEAD(head); for (i = 0; i < num; i++) { if (fb_add_videomode(&modedb[i], head)) return; } } const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs, struct list_head *head) { struct fb_modelist *modelist; const struct fb_videomode *m, *m1 = NULL, *md = NULL, *best = NULL; int first = 0; if (!head->prev || !head->next || list_empty(head)) goto finished; /* get the first detailed mode and the very first mode */ list_for_each_entry(modelist, head, list) { m = &modelist->mode; if (!first) { m1 = m; first = 1; } if (m->flag & FB_MODE_IS_FIRST) { md = m; break; } } /* first detailed timing is preferred */ if (specs->misc & FB_MISC_1ST_DETAIL) { best = md; goto finished; } /* find best mode based on display width and height */ if (specs->max_x && specs->max_y) { struct fb_var_screeninfo var; memset(&var, 0, sizeof(struct fb_var_screeninfo)); var.xres = (specs->max_x * 7200)/254; var.yres = (specs->max_y * 7200)/254; m = fb_find_best_mode(&var, head); if (m) { best = m; goto finished; } } /* use first detailed mode */ if (md) { best = md; goto finished; } /* last resort, use the very first mode */ best = m1; finished: return best; } EXPORT_SYMBOL(fb_find_best_display); EXPORT_SYMBOL(fb_videomode_to_var); EXPORT_SYMBOL(fb_var_to_videomode); EXPORT_SYMBOL(fb_mode_is_equal); EXPORT_SYMBOL(fb_add_videomode); EXPORT_SYMBOL(fb_match_mode); EXPORT_SYMBOL(fb_find_best_mode); EXPORT_SYMBOL(fb_find_nearest_mode); EXPORT_SYMBOL(fb_videomode_to_modelist); EXPORT_SYMBOL(fb_find_mode); EXPORT_SYMBOL(fb_find_mode_cvt); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * A policy database (policydb) specifies the * configuration data for the security policy. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ /* * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com> * Support for enhanced MLS infrastructure. * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. * * Updated: Frank Mayer <mayerf@tresys.com> and * Karl MacMillan <kmacmillan@tresys.com> * Added conditional policy language extensions * Copyright (C) 2003-2004 Tresys Technology, LLC */ #ifndef _SS_POLICYDB_H_ #define _SS_POLICYDB_H_ #include "symtab.h" #include "avtab.h" #include "sidtab.h" #include "ebitmap.h" #include "mls_types.h" #include "context.h" #include "constraint.h" /* * A datum type is defined for each kind of symbol * in the configuration data: individual permissions, * common prefixes for access vectors, classes, * users, roles, types, sensitivities, categories, etc. */ /* Permission attributes */ struct perm_datum { u32 value; /* permission bit + 1 */ }; /* Attributes of a common prefix for access vectors */ struct common_datum { u32 value; /* internal common value */ struct symtab permissions; /* common permissions */ }; /* Class attributes */ struct class_datum { u32 value; /* class value */ char *comkey; /* common name */ struct common_datum *comdatum; /* common datum */ struct symtab permissions; /* class-specific permission symbol table */ struct constraint_node *constraints; /* constraints on class perms */ struct constraint_node *validatetrans; /* special transition rules */ /* Options how a new object user, role, and type should be decided */ #define DEFAULT_SOURCE 1 #define DEFAULT_TARGET 2 char default_user; char default_role; char default_type; /* Options how a new object range should be decided */ #define DEFAULT_SOURCE_LOW 1 #define DEFAULT_SOURCE_HIGH 2 #define DEFAULT_SOURCE_LOW_HIGH 3 #define DEFAULT_TARGET_LOW 4 #define DEFAULT_TARGET_HIGH 5 #define DEFAULT_TARGET_LOW_HIGH 6 #define DEFAULT_GLBLUB 7 char default_range; }; /* Role attributes */ struct role_datum { u32 value; /* internal role value */ u32 bounds; /* boundary of role */ struct ebitmap dominates; /* set of roles dominated by this role */ struct ebitmap types; /* set of authorized types for role */ }; struct role_trans_key { u32 role; /* current role */ u32 type; /* program executable type, or new object type */ u32 tclass; /* process class, or new object class */ }; struct role_trans_datum { u32 new_role; /* new role */ }; struct filename_trans_key { u32 ttype; /* parent dir context */ u16 tclass; /* class of new object */ const char *name; /* last path component */ }; struct filename_trans_datum { struct ebitmap stypes; /* bitmap of source types for this otype */ u32 otype; /* resulting type of new object */ struct filename_trans_datum *next; /* record for next otype*/ }; struct role_allow { u32 role; /* current role */ u32 new_role; /* new role */ struct role_allow *next; }; /* Type attributes */ struct type_datum { u32 value; /* internal type value */ u32 bounds; /* boundary of type */ unsigned char primary; /* primary name? */ unsigned char attribute; /* attribute ?*/ }; /* User attributes */ struct user_datum { u32 value; /* internal user value */ u32 bounds; /* bounds of user */ struct ebitmap roles; /* set of authorized roles for user */ struct mls_range range; /* MLS range (min - max) for user */ struct mls_level dfltlevel; /* default login MLS level for user */ }; /* Sensitivity attributes */ struct level_datum { struct mls_level *level; /* sensitivity and associated categories */ unsigned char isalias; /* is this sensitivity an alias for another? */ }; /* Category attributes */ struct cat_datum { u32 value; /* internal category bit + 1 */ unsigned char isalias; /* is this category an alias for another? */ }; struct range_trans { u32 source_type; u32 target_type; u32 target_class; }; /* Boolean data type */ struct cond_bool_datum { __u32 value; /* internal type value */ int state; }; struct cond_node; /* * type set preserves data needed to determine constraint info from * policy source. This is not used by the kernel policy but allows * utilities such as audit2allow to determine constraint denials. */ struct type_set { struct ebitmap types; struct ebitmap negset; u32 flags; }; /* * The configuration data includes security contexts for * initial SIDs, unlabeled file systems, TCP and UDP port numbers, * network interfaces, and nodes. This structure stores the * relevant data for one such entry. Entries of the same kind * (e.g. all initial SIDs) are linked together into a list. */ struct ocontext { union { char *name; /* name of initial SID, fs, netif, fstype, path */ struct { u8 protocol; u16 low_port; u16 high_port; } port; /* TCP or UDP port information */ struct { u32 addr; u32 mask; } node; /* node information */ struct { u32 addr[4]; u32 mask[4]; } node6; /* IPv6 node information */ struct { u64 subnet_prefix; u16 low_pkey; u16 high_pkey; } ibpkey; struct { char *dev_name; u8 port; } ibendport; } u; union { u32 sclass; /* security class for genfs */ u32 behavior; /* labeling behavior for fs_use */ } v; struct context context[2]; /* security context(s) */ u32 sid[2]; /* SID(s) */ struct ocontext *next; }; struct genfs { char *fstype; struct ocontext *head; struct genfs *next; }; /* symbol table array indices */ #define SYM_COMMONS 0 #define SYM_CLASSES 1 #define SYM_ROLES 2 #define SYM_TYPES 3 #define SYM_USERS 4 #define SYM_BOOLS 5 #define SYM_LEVELS 6 #define SYM_CATS 7 #define SYM_NUM 8 /* object context array indices */ #define OCON_ISID 0 /* initial SIDs */ #define OCON_FS 1 /* unlabeled file systems (deprecated) */ #define OCON_PORT 2 /* TCP and UDP port numbers */ #define OCON_NETIF 3 /* network interfaces */ #define OCON_NODE 4 /* nodes */ #define OCON_FSUSE 5 /* fs_use */ #define OCON_NODE6 6 /* IPv6 nodes */ #define OCON_IBPKEY 7 /* Infiniband PKeys */ #define OCON_IBENDPORT 8 /* Infiniband end ports */ #define OCON_NUM 9 /* The policy database */ struct policydb { int mls_enabled; /* symbol tables */ struct symtab symtab[SYM_NUM]; #define p_commons symtab[SYM_COMMONS] #define p_classes symtab[SYM_CLASSES] #define p_roles symtab[SYM_ROLES] #define p_types symtab[SYM_TYPES] #define p_users symtab[SYM_USERS] #define p_bools symtab[SYM_BOOLS] #define p_levels symtab[SYM_LEVELS] #define p_cats symtab[SYM_CATS] /* symbol names indexed by (value - 1) */ char **sym_val_to_name[SYM_NUM]; /* class, role, and user attributes indexed by (value - 1) */ struct class_datum **class_val_to_struct; struct role_datum **role_val_to_struct; struct user_datum **user_val_to_struct; struct type_datum **type_val_to_struct; /* type enforcement access vectors and transitions */ struct avtab te_avtab; /* role transitions */ struct hashtab role_tr; /* file transitions with the last path component */ /* quickly exclude lookups when parent ttype has no rules */ struct ebitmap filename_trans_ttypes; /* actual set of filename_trans rules */ struct hashtab filename_trans; /* only used if policyvers < POLICYDB_VERSION_COMP_FTRANS */ u32 compat_filename_trans_count; /* bools indexed by (value - 1) */ struct cond_bool_datum **bool_val_to_struct; /* type enforcement conditional access vectors and transitions */ struct avtab te_cond_avtab; /* array indexing te_cond_avtab by conditional */ struct cond_node *cond_list; u32 cond_list_len; /* role allows */ struct role_allow *role_allow; /* security contexts of initial SIDs, unlabeled file systems, TCP or UDP port numbers, network interfaces and nodes */ struct ocontext *ocontexts[OCON_NUM]; /* security contexts for files in filesystems that cannot support a persistent label mapping or use another fixed labeling behavior. */ struct genfs *genfs; /* range transitions table (range_trans_key -> mls_range) */ struct hashtab range_tr; /* type -> attribute reverse mapping */ struct ebitmap *type_attr_map_array; struct ebitmap policycaps; struct ebitmap permissive_map; /* length of this policy when it was loaded */ size_t len; unsigned int policyvers; unsigned int reject_unknown : 1; unsigned int allow_unknown : 1; u16 process_class; u32 process_trans_perms; } __randomize_layout; extern void policydb_destroy(struct policydb *p); extern int policydb_load_isids(struct policydb *p, struct sidtab *s); extern int policydb_context_isvalid(struct policydb *p, struct context *c); extern int policydb_class_isvalid(struct policydb *p, unsigned int class); extern int policydb_type_isvalid(struct policydb *p, unsigned int type); extern int policydb_role_isvalid(struct policydb *p, unsigned int role); extern int policydb_read(struct policydb *p, void *fp); extern int policydb_write(struct policydb *p, void *fp); extern struct filename_trans_datum * policydb_filenametr_search(struct policydb *p, struct filename_trans_key *key); extern struct mls_range *policydb_rangetr_search(struct policydb *p, struct range_trans *key); extern struct role_trans_datum * policydb_roletr_search(struct policydb *p, struct role_trans_key *key); #define POLICYDB_CONFIG_MLS 1 /* the config flags related to unknown classes/perms are bits 2 and 3 */ #define REJECT_UNKNOWN 0x00000002 #define ALLOW_UNKNOWN 0x00000004 #define OBJECT_R "object_r" #define OBJECT_R_VAL 1 #define POLICYDB_MAGIC SELINUX_MAGIC #define POLICYDB_STRING "SE Linux" struct policy_file { char *data; size_t len; }; struct policy_data { struct policydb *p; void *fp; }; static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) { if (bytes > fp->len) return -EINVAL; memcpy(buf, fp->data, bytes); fp->data += bytes; fp->len -= bytes; return 0; } static inline int put_entry(const void *buf, size_t bytes, size_t num, struct policy_file *fp) { size_t len; if (unlikely(check_mul_overflow(bytes, num, &len))) return -EINVAL; if (len > fp->len) return -EINVAL; memcpy(fp->data, buf, len); fp->data += len; fp->len -= len; return 0; } static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr) { return p->sym_val_to_name[sym_num][element_nr]; } extern u16 string_to_security_class(struct policydb *p, const char *name); extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); #endif /* _SS_POLICYDB_H_ */ |
7 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic GPIO card-detect helper * * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> */ #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/mmc/host.h> #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/slab.h> #include "slot-gpio.h" struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; irq_handler_t cd_gpio_isr; char *ro_label; char *cd_label; u32 cd_debounce_delay_ms; int cd_irq; }; static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) { /* Schedule a card detection after a debounce timeout */ struct mmc_host *host = dev_id; struct mmc_gpio *ctx = host->slot.handler_priv; host->trigger_card_event = true; mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms)); return IRQ_HANDLED; } int mmc_gpio_alloc(struct mmc_host *host) { const char *devname = dev_name(host->parent); struct mmc_gpio *ctx; ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->cd_debounce_delay_ms = 200; ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname); if (!ctx->cd_label) return -ENOMEM; ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname); if (!ctx->ro_label) return -ENOMEM; ctx->cd_irq = -EINVAL; host->slot.handler_priv = ctx; host->slot.cd_irq = -EINVAL; return 0; } void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq) { struct mmc_gpio *ctx = host->slot.handler_priv; if (!ctx || irq < 0) return; ctx->cd_irq = irq; } EXPORT_SYMBOL(mmc_gpio_set_cd_irq); int mmc_gpio_get_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int cansleep; if (!ctx || !ctx->ro_gpio) return -ENOSYS; cansleep = gpiod_cansleep(ctx->ro_gpio); return cansleep ? gpiod_get_value_cansleep(ctx->ro_gpio) : gpiod_get_value(ctx->ro_gpio); } EXPORT_SYMBOL(mmc_gpio_get_ro); int mmc_gpio_get_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int cansleep; if (!ctx || !ctx->cd_gpio) return -ENOSYS; cansleep = gpiod_cansleep(ctx->cd_gpio); return cansleep ? gpiod_get_value_cansleep(ctx->cd_gpio) : gpiod_get_value(ctx->cd_gpio); } EXPORT_SYMBOL(mmc_gpio_get_cd); void mmc_gpiod_request_cd_irq(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int irq = -EINVAL; int ret; if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) return; /* * Do not use IRQ if the platform prefers to poll, e.g., because that * IRQ number is already used by another unit and cannot be shared. */ if (ctx->cd_irq >= 0) irq = ctx->cd_irq; else if (!(host->caps & MMC_CAP_NEEDS_POLL)) irq = gpiod_to_irq(ctx->cd_gpio); if (irq >= 0) { if (!ctx->cd_gpio_isr) ctx->cd_gpio_isr = mmc_gpio_cd_irqt; ret = devm_request_threaded_irq(host->parent, irq, NULL, ctx->cd_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, ctx->cd_label, host); if (ret < 0) irq = ret; } host->slot.cd_irq = irq; if (irq < 0) host->caps |= MMC_CAP_NEEDS_POLL; } EXPORT_SYMBOL(mmc_gpiod_request_cd_irq); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on) { int ret = 0; if (!(host->caps & MMC_CAP_CD_WAKE) || host->slot.cd_irq < 0 || on == host->slot.cd_wake_enabled) return 0; if (on) { ret = enable_irq_wake(host->slot.cd_irq); host->slot.cd_wake_enabled = !ret; } else { disable_irq_wake(host->slot.cd_irq); host->slot.cd_wake_enabled = false; } return ret; } EXPORT_SYMBOL(mmc_gpio_set_cd_wake); /* Register an alternate interrupt service routine for * the card-detect GPIO. */ void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr) { struct mmc_gpio *ctx = host->slot.handler_priv; WARN_ON(ctx->cd_gpio_isr); ctx->cd_gpio_isr = isr; } EXPORT_SYMBOL(mmc_gpio_set_cd_isr); /** * mmc_gpiod_request_cd - request a gpio descriptor for card-detection * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds * * Note that this must be called prior to mmc_add_host() * otherwise the caller must also call mmc_gpiod_request_cd_irq(). * * Returns zero on success, else an error. */ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; int ret; desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); if (IS_ERR(desc)) return PTR_ERR(desc); /* Update default label if no con_id provided */ if (!con_id) gpiod_set_consumer_name(desc, ctx->cd_label); if (debounce) { ret = gpiod_set_debounce(desc, debounce); if (ret < 0) ctx->cd_debounce_delay_ms = debounce / 1000; } /* override forces default (active-low) polarity ... */ if (override_active_level && !gpiod_is_active_low(desc)) gpiod_toggle_active_low(desc); /* ... or active-high */ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) gpiod_toggle_active_low(desc); ctx->cd_gpio = desc; return 0; } EXPORT_SYMBOL(mmc_gpiod_request_cd); /** * mmc_gpiod_set_cd_config - set config for card-detection GPIO * @host: mmc host * @config: Generic pinconf config (from pinconf_to_config_packed()) * * This can be used by mmc host drivers to fixup a card-detection GPIO's config * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor * through mmc_gpiod_request_cd(). * * Returns: * 0 on success, or a negative errno value on error. */ int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config) { struct mmc_gpio *ctx = host->slot.handler_priv; return gpiod_set_config(ctx->cd_gpio, config); } EXPORT_SYMBOL(mmc_gpiod_set_cd_config); bool mmc_can_gpio_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; return ctx->cd_gpio ? true : false; } EXPORT_SYMBOL(mmc_can_gpio_cd); /** * mmc_gpiod_request_ro - request a gpio descriptor for write protection * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @debounce: debounce time in microseconds * * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int idx, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; int ret; desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); if (IS_ERR(desc)) return PTR_ERR(desc); /* Update default label if no con_id provided */ if (!con_id) gpiod_set_consumer_name(desc, ctx->ro_label); if (debounce) { ret = gpiod_set_debounce(desc, debounce); if (ret < 0) return ret; } if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) gpiod_toggle_active_low(desc); ctx->ro_gpio = desc; return 0; } EXPORT_SYMBOL(mmc_gpiod_request_ro); bool mmc_can_gpio_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; return ctx->ro_gpio ? true : false; } EXPORT_SYMBOL(mmc_can_gpio_ro); |
15 11 11 11 11 8 3 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 15 15 15 15 15 14 15 15 15 15 111 110 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 | // SPDX-License-Identifier: GPL-2.0-only /* * Interface handling * * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (c) 2016 Intel Deutschland GmbH * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/kcov.h> #include <net/mac80211.h> #include <net/ieee80211_radiotap.h> #include "ieee80211_i.h" #include "sta_info.h" #include "debugfs_netdev.h" #include "mesh.h" #include "led.h" #include "driver-ops.h" #include "wme.h" #include "rate.h" /** * DOC: Interface list locking * * The interface list in each struct ieee80211_local is protected * three-fold: * * (1) modifications may only be done under the RTNL *and* wiphy mutex * *and* iflist_mtx * (2) modifications are done in an RCU manner so atomic readers * can traverse the list in RCU-safe blocks. * * As a consequence, reads (traversals) of the list can be protected * by either the RTNL, the wiphy mutex, the iflist_mtx or RCU. */ static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work); bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) { struct ieee80211_chanctx_conf *chanctx_conf; int power; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return false; } power = ieee80211_chandef_max_power(&chanctx_conf->def); rcu_read_unlock(); if (sdata->deflink.user_power_level != IEEE80211_UNSET_POWER_LEVEL) power = min(power, sdata->deflink.user_power_level); if (sdata->deflink.ap_power_level != IEEE80211_UNSET_POWER_LEVEL) power = min(power, sdata->deflink.ap_power_level); if (power != sdata->vif.bss_conf.txpower) { sdata->vif.bss_conf.txpower = power; ieee80211_hw_config(sdata->local, 0); return true; } return false; } void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, bool update_bss) { if (__ieee80211_recalc_txpower(sdata) || (update_bss && ieee80211_sdata_running(sdata))) ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_TXPOWER); } static u32 __ieee80211_idle_off(struct ieee80211_local *local) { if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) return 0; local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; } static u32 __ieee80211_idle_on(struct ieee80211_local *local) { if (local->hw.conf.flags & IEEE80211_CONF_IDLE) return 0; ieee80211_flush_queues(local, NULL, false); local->hw.conf.flags |= IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; } static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, bool force_active) { bool working, scanning, active; unsigned int led_trig_start = 0, led_trig_stop = 0; lockdep_assert_wiphy(local->hw.wiphy); active = force_active || !list_empty(&local->chanctx_list) || local->monitors; working = !local->ops->remain_on_channel && !list_empty(&local->roc_list); scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) || test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); if (working || scanning) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; if (active) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); if (working || scanning || active) return __ieee80211_idle_off(local); return __ieee80211_idle_on(local); } u32 ieee80211_idle_off(struct ieee80211_local *local) { return __ieee80211_recalc_idle(local, true); } void ieee80211_recalc_idle(struct ieee80211_local *local) { u32 change = __ieee80211_recalc_idle(local, false); if (change) ieee80211_hw_config(local, change); } static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr, bool check_dup) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *iter; u64 new, mask, tmp; u8 *m; int ret = 0; lockdep_assert_wiphy(local->hw.wiphy); if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) return 0; m = addr; new = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); m = local->hw.wiphy->addr_mask; mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); if (!check_dup) return ret; list_for_each_entry(iter, &local->interfaces, list) { if (iter == sdata) continue; if (iter->vif.type == NL80211_IFTYPE_MONITOR && !(iter->u.mntr.flags & MONITOR_FLAG_ACTIVE)) continue; m = iter->vif.addr; tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); if ((new & ~mask) != (tmp & ~mask)) { ret = -EINVAL; break; } } return ret; } static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata) { struct ieee80211_roc_work *roc; struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *scan_sdata; int ret = 0; lockdep_assert_wiphy(local->hw.wiphy); /* To be the most flexible here we want to only limit changing the * address if the specific interface is doing offchannel work or * scanning. */ if (netif_carrier_ok(sdata->dev)) return -EBUSY; /* First check no ROC work is happening on this iface */ list_for_each_entry(roc, &local->roc_list, list) { if (roc->sdata != sdata) continue; if (roc->started) { ret = -EBUSY; goto unlock; } } /* And if this iface is scanning */ if (local->scanning) { scan_sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)); if (sdata == scan_sdata) ret = -EBUSY; } switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: /* More interface types could be added here but changing the * address while powered makes the most sense in client modes. */ break; default: ret = -EOPNOTSUPP; } unlock: return ret; } static int _ieee80211_change_mac(struct ieee80211_sub_if_data *sdata, void *addr) { struct ieee80211_local *local = sdata->local; struct sockaddr *sa = addr; bool check_dup = true; bool live = false; int ret; if (ieee80211_sdata_running(sdata)) { ret = ieee80211_can_powered_addr_change(sdata); if (ret) return ret; live = true; } if (sdata->vif.type == NL80211_IFTYPE_MONITOR && !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) check_dup = false; ret = ieee80211_verify_mac(sdata, sa->sa_data, check_dup); if (ret) return ret; if (live) drv_remove_interface(local, sdata); ret = eth_mac_addr(sdata->dev, sa); if (ret == 0) { memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); } /* Regardless of eth_mac_addr() return we still want to add the * interface back. This should not fail... */ if (live) WARN_ON(drv_add_interface(local, sdata)); return ret; } static int ieee80211_change_mac(struct net_device *dev, void *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int ret; /* * This happens during unregistration if there's a bond device * active (maybe other cases?) and we must get removed from it. * But we really don't care anymore if it's not registered now. */ if (!dev->ieee80211_ptr->registered) return 0; wiphy_lock(local->hw.wiphy); ret = _ieee80211_change_mac(sdata, addr); wiphy_unlock(local->hw.wiphy); return ret; } static inline int identical_mac_addr_allowed(int type1, int type2) { return type1 == NL80211_IFTYPE_MONITOR || type2 == NL80211_IFTYPE_MONITOR || type1 == NL80211_IFTYPE_P2P_DEVICE || type2 == NL80211_IFTYPE_P2P_DEVICE || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || (type1 == NL80211_IFTYPE_AP_VLAN && (type2 == NL80211_IFTYPE_AP || type2 == NL80211_IFTYPE_AP_VLAN)); } static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype iftype) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *nsdata; ASSERT_RTNL(); lockdep_assert_wiphy(local->hw.wiphy); /* we hold the RTNL here so can safely walk the list */ list_for_each_entry(nsdata, &local->interfaces, list) { if (nsdata != sdata && ieee80211_sdata_running(nsdata)) { /* * Only OCB and monitor mode may coexist */ if ((sdata->vif.type == NL80211_IFTYPE_OCB && nsdata->vif.type != NL80211_IFTYPE_MONITOR) || (sdata->vif.type != NL80211_IFTYPE_MONITOR && nsdata->vif.type == NL80211_IFTYPE_OCB)) return -EBUSY; /* * Allow only a single IBSS interface to be up at any * time. This is restricted because beacon distribution * cannot work properly if both are in the same IBSS. * * To remove this restriction we'd have to disallow them * from setting the same SSID on different IBSS interfaces * belonging to the same hardware. Then, however, we're * faced with having to adopt two different TSF timers... */ if (iftype == NL80211_IFTYPE_ADHOC && nsdata->vif.type == NL80211_IFTYPE_ADHOC) return -EBUSY; /* * will not add another interface while any channel * switch is active. */ if (nsdata->vif.bss_conf.csa_active) return -EBUSY; /* * The remaining checks are only performed for interfaces * with the same MAC address. */ if (!ether_addr_equal(sdata->vif.addr, nsdata->vif.addr)) continue; /* * check whether it may have the same address */ if (!identical_mac_addr_allowed(iftype, nsdata->vif.type)) return -ENOTUNIQ; /* No support for VLAN with MLO yet */ if (iftype == NL80211_IFTYPE_AP_VLAN && sdata->wdev.use_4addr && nsdata->vif.type == NL80211_IFTYPE_AP && nsdata->vif.valid_links) return -EOPNOTSUPP; /* * can only add VLANs to enabled APs */ if (iftype == NL80211_IFTYPE_AP_VLAN && nsdata->vif.type == NL80211_IFTYPE_AP) sdata->bss = &nsdata->u.ap; } } return ieee80211_check_combinations(sdata, NULL, 0, 0, -1); } static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype iftype) { int n_queues = sdata->local->hw.queues; int i; if (iftype == NL80211_IFTYPE_NAN) return 0; if (iftype != NL80211_IFTYPE_P2P_DEVICE) { for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (WARN_ON_ONCE(sdata->vif.hw_queue[i] == IEEE80211_INVAL_HW_QUEUE)) return -EINVAL; if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >= n_queues)) return -EINVAL; } } if ((iftype != NL80211_IFTYPE_AP && iftype != NL80211_IFTYPE_P2P_GO && iftype != NL80211_IFTYPE_MESH_POINT) || !ieee80211_hw_check(&sdata->local->hw, QUEUE_CONTROL)) { sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; return 0; } if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE)) return -EINVAL; if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues)) return -EINVAL; return 0; } static int ieee80211_open(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int err; /* fail early if user set an invalid address */ if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; wiphy_lock(sdata->local->hw.wiphy); err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type); if (err) goto out; err = ieee80211_do_open(&sdata->wdev, true); out: wiphy_unlock(sdata->local->hw.wiphy); return err; } static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) { struct ieee80211_local *local = sdata->local; unsigned long flags; struct sk_buff_head freeq; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; int i, flushed; struct ps_data *ps; struct cfg80211_chan_def chandef; bool cancel_scan; struct cfg80211_nan_func *func; lockdep_assert_wiphy(local->hw.wiphy); clear_bit(SDATA_STATE_RUNNING, &sdata->state); synchronize_rcu(); /* flush _ieee80211_wake_txqs() */ cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; if (cancel_scan) ieee80211_scan_cancel(local); ieee80211_roc_purge(local, sdata); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_mgd_stop(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_stop(sdata); break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) break; list_del_rcu(&sdata->u.mntr.list); break; default: break; } /* * Remove all stations associated with this interface. * * This must be done before calling ops->remove_interface() * because otherwise we can later invoke ops->sta_notify() * whenever the STAs are removed, and that invalidates driver * assumptions about always getting a vif pointer that is valid * (because if we remove a STA after ops->remove_interface() * the driver will have removed the vif info already!) * * For AP_VLANs stations may exist since there's nothing else that * would have removed them, but in other modes there shouldn't * be any stations. */ flushed = sta_info_flush(sdata, -1); WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && flushed > 0); /* don't count this interface for allmulti while it is down */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_dec(&local->iff_allmultis); if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll--; local->fif_probe_req--; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req--; } if (sdata->dev) { netif_addr_lock_bh(sdata->dev); spin_lock_bh(&local->filter_lock); __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, sdata->dev->addr_len); spin_unlock_bh(&local->filter_lock); netif_addr_unlock_bh(sdata->dev); } del_timer_sync(&local->dynamic_ps_timer); wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); WARN(ieee80211_vif_is_mld(&sdata->vif), "destroying interface with valid links 0x%04x\n", sdata->vif.valid_links); sdata->vif.bss_conf.csa_active = false; if (sdata->vif.type == NL80211_IFTYPE_STATION) sdata->deflink.u.mgd.csa.waiting_bcn = false; ieee80211_vif_unblock_queues_csa(sdata); wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa.finalize_work); wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.color_change_finalize_work); wiphy_delayed_work_cancel(local->hw.wiphy, &sdata->deflink.dfs_cac_timer_work); if (sdata->wdev.links[0].cac_started) { chandef = sdata->vif.bss_conf.chanreq.oper; WARN_ON(local->suspended); ieee80211_link_release_channel(&sdata->deflink); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0); } if (sdata->vif.type == NL80211_IFTYPE_AP) { WARN_ON(!list_empty(&sdata->u.ap.vlans)); } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { /* remove all packets in parent bc_buf pointing to this dev */ ps = &sdata->bss->ps; spin_lock_irqsave(&ps->bc_buf.lock, flags); skb_queue_walk_safe(&ps->bc_buf, skb, tmp) { if (skb->dev == sdata->dev) { __skb_unlink(skb, &ps->bc_buf); local->total_ps_buffered--; ieee80211_free_txskb(&local->hw, skb); } } spin_unlock_irqrestore(&ps->bc_buf.lock, flags); } if (going_down) local->open_count--; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: list_del(&sdata->u.vlan.list); RCU_INIT_POINTER(sdata->vif.bss_conf.chanctx_conf, NULL); /* see comment in the default case below */ ieee80211_free_keys(sdata, true); /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs--; break; } local->monitors--; if (local->monitors == 0) { local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, -1); break; case NL80211_IFTYPE_NAN: /* clean all the functions */ spin_lock_bh(&sdata->u.nan.func_lock); idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, i) { idr_remove(&sdata->u.nan.function_inst_ids, i); cfg80211_free_nan_func(func); } idr_destroy(&sdata->u.nan.function_inst_ids); spin_unlock_bh(&sdata->u.nan.func_lock); break; case NL80211_IFTYPE_P2P_DEVICE: /* relies on synchronize_rcu() below */ RCU_INIT_POINTER(local->p2p_sdata, NULL); fallthrough; default: wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work); /* * When we get here, the interface is marked down. * Free the remaining keys, if there are any * (which can happen in AP mode if userspace sets * keys before the interface is operating) * * Force the key freeing to always synchronize_net() * to wait for the RX path in case it is using this * interface enqueuing frames at this very time on * another CPU. */ ieee80211_free_keys(sdata, true); skb_queue_purge(&sdata->skb_queue); skb_queue_purge(&sdata->status_queue); } /* * Since ieee80211_free_txskb() may issue __dev_queue_xmit() * which should be called with interrupts enabled, reclamation * is done in two phases: */ __skb_queue_head_init(&freeq); /* unlink from local queues... */ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_walk_safe(&local->pending[i], skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (info->control.vif == &sdata->vif) { __skb_unlink(skb, &local->pending[i]); __skb_queue_tail(&freeq, skb); } } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); /* ... and perform actual reclamation with interrupts enabled. */ skb_queue_walk_safe(&freeq, skb, tmp) { __skb_unlink(skb, &freeq); ieee80211_free_txskb(&local->hw, skb); } if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ieee80211_txq_remove_vlan(local, sdata); sdata->bss = NULL; if (local->open_count == 0) ieee80211_clear_tx_pending(local); sdata->vif.bss_conf.beacon_int = 0; /* * If the interface goes down while suspended, presumably because * the device was unplugged and that happens before our resume, * then the driver is already unconfigured and the remainder of * this function isn't needed. * XXX: what about WoWLAN? If the device has software state, e.g. * memory allocated, it might expect teardown commands from * mac80211 here? */ if (local->suspended) { WARN_ON(local->wowlan); WARN_ON(rcu_access_pointer(local->monitor_sdata)); return; } switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_MONITOR: if (local->monitors == 0) ieee80211_del_virtual_monitor(local); ieee80211_recalc_idle(local); ieee80211_recalc_offload(local); if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) break; fallthrough; default: if (!going_down) break; drv_remove_interface(local, sdata); /* Clear private driver data to prevent reuse */ memset(sdata->vif.drv_priv, 0, local->hw.vif_data_size); } ieee80211_recalc_ps(local); if (cancel_scan) wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work); if (local->open_count == 0) { ieee80211_stop_device(local, false); /* no reconfiguring after stop! */ return; } /* do after stop to avoid reconfiguring when we stop anyway */ ieee80211_configure_filter(local); ieee80211_hw_config(local, hw_reconf_flags); if (local->monitors == local->open_count) ieee80211_add_virtual_monitor(local); } static void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata) { struct ieee80211_sub_if_data *tx_sdata, *non_tx_sdata, *tmp_sdata; struct ieee80211_vif *tx_vif = sdata->vif.mbssid_tx_vif; if (!tx_vif) return; tx_sdata = vif_to_sdata(tx_vif); sdata->vif.mbssid_tx_vif = NULL; list_for_each_entry_safe(non_tx_sdata, tmp_sdata, &tx_sdata->local->interfaces, list) { if (non_tx_sdata != sdata && non_tx_sdata != tx_sdata && non_tx_sdata->vif.mbssid_tx_vif == tx_vif && ieee80211_sdata_running(non_tx_sdata)) { non_tx_sdata->vif.mbssid_tx_vif = NULL; dev_close(non_tx_sdata->wdev.netdev); } } if (sdata != tx_sdata && ieee80211_sdata_running(tx_sdata)) { tx_sdata->vif.mbssid_tx_vif = NULL; dev_close(tx_sdata->wdev.netdev); } } static int ieee80211_stop(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); /* close dependent VLAN and MBSSID interfaces before locking wiphy */ if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_sub_if_data *vlan, *tmpsdata; list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, u.vlan.list) dev_close(vlan->dev); ieee80211_stop_mbssid(sdata); } wiphy_lock(sdata->local->hw.wiphy); wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->activate_links_work); ieee80211_do_stop(sdata, true); wiphy_unlock(sdata->local->hw.wiphy); return 0; } static void ieee80211_set_multicast_list(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int allmulti, sdata_allmulti; allmulti = !!(dev->flags & IFF_ALLMULTI); sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); if (allmulti != sdata_allmulti) { if (dev->flags & IFF_ALLMULTI) atomic_inc(&local->iff_allmultis); else atomic_dec(&local->iff_allmultis); sdata->flags ^= IEEE80211_SDATA_ALLMULTI; } spin_lock_bh(&local->filter_lock); __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); spin_unlock_bh(&local->filter_lock); wiphy_work_queue(local->hw.wiphy, &local->reconfig_filter); } /* * Called when the netdev is removed or, by the code below, before * the interface type changes. */ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) { /* free extra data */ ieee80211_free_keys(sdata, false); ieee80211_debugfs_remove_netdev(sdata); ieee80211_destroy_frag_cache(&sdata->frags); if (ieee80211_vif_is_mesh(&sdata->vif)) ieee80211_mesh_teardown_sdata(sdata); ieee80211_vif_clear_links(sdata); ieee80211_link_stop(&sdata->deflink); } static void ieee80211_uninit(struct net_device *dev) { ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev)); } static int ieee80211_netdev_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; return drv_net_setup_tc(local, sdata, dev, type, type_data); } static const struct net_device_ops ieee80211_dataif_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_uninit, .ndo_start_xmit = ieee80211_subif_start_xmit, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, .ndo_setup_tc = ieee80211_netdev_setup_tc, }; static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; int len_rthdr; if (local->hw.queues < IEEE80211_NUM_ACS) return 0; /* reset flags and info before parsing radiotap header */ memset(info, 0, sizeof(*info)); if (!ieee80211_parse_tx_radiotap(skb, dev)) return 0; /* doesn't matter, frame will be dropped */ len_rthdr = ieee80211_get_radiotap_len(skb->data); hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); if (skb->len < len_rthdr + 2 || skb->len < len_rthdr + ieee80211_hdrlen(hdr->frame_control)) return 0; /* doesn't matter, frame will be dropped */ return ieee80211_select_queue_80211(sdata, skb, hdr); } static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_uninit, .ndo_start_xmit = ieee80211_monitor_start_xmit, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_monitor_select_queue, }; static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx, struct net_device_path *path) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; struct sta_info *sta; int ret = -ENOENT; sdata = IEEE80211_DEV_TO_SUB_IF(ctx->dev); local = sdata->local; if (!local->ops->net_fill_forward_path) return -EOPNOTSUPP; rcu_read_lock(); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) break; if (sdata->wdev.use_4addr) goto out; if (is_multicast_ether_addr(ctx->daddr)) goto out; sta = sta_info_get_bss(sdata, ctx->daddr); break; case NL80211_IFTYPE_AP: if (is_multicast_ether_addr(ctx->daddr)) goto out; sta = sta_info_get(sdata, ctx->daddr); break; case NL80211_IFTYPE_STATION: if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { sta = sta_info_get(sdata, ctx->daddr); if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) goto out; break; } } sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid); break; default: goto out; } if (!sta) goto out; ret = drv_net_fill_forward_path(local, sdata, &sta->sta, ctx, path); out: rcu_read_unlock(); return ret; } static const struct net_device_ops ieee80211_dataif_8023_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, .ndo_uninit = ieee80211_uninit, .ndo_start_xmit = ieee80211_subif_start_xmit_8023, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, .ndo_fill_forward_path = ieee80211_netdev_fill_forward_path, .ndo_setup_tc = ieee80211_netdev_setup_tc, }; static bool ieee80211_iftype_supports_hdr_offload(enum nl80211_iftype iftype) { switch (iftype) { /* P2P GO and client are mapped to AP/STATION types */ case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: return true; default: return false; } } static bool ieee80211_set_sdata_offload_flags(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; u32 flags; flags = sdata->vif.offload_flags; if (ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) && ieee80211_iftype_supports_hdr_offload(sdata->vif.type)) { flags |= IEEE80211_OFFLOAD_ENCAP_ENABLED; if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG) && local->hw.wiphy->frag_threshold != (u32)-1) flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; if (local->monitors) flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; } else { flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; } if (ieee80211_hw_check(&local->hw, SUPPORTS_RX_DECAP_OFFLOAD) && ieee80211_iftype_supports_hdr_offload(sdata->vif.type)) { flags |= IEEE80211_OFFLOAD_DECAP_ENABLED; if (local->monitors && !ieee80211_hw_check(&local->hw, SUPPORTS_CONC_MON_RX_DECAP)) flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; } else { flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; } if (sdata->vif.offload_flags == flags) return false; sdata->vif.offload_flags = flags; ieee80211_check_fast_rx_iface(sdata); return true; } static void ieee80211_set_vif_encap_ops(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *bss = sdata; bool enabled; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (!sdata->bss) return; bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); } if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) || !ieee80211_iftype_supports_hdr_offload(bss->vif.type)) return; enabled = bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED; if (sdata->wdev.use_4addr && !(bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_4ADDR)) enabled = false; sdata->dev->netdev_ops = enabled ? &ieee80211_dataif_8023_ops : &ieee80211_dataif_ops; } static void ieee80211_recalc_sdata_offload(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *vsdata; if (ieee80211_set_sdata_offload_flags(sdata)) { drv_update_vif_offload(local, sdata); ieee80211_set_vif_encap_ops(sdata); } list_for_each_entry(vsdata, &local->interfaces, list) { if (vsdata->vif.type != NL80211_IFTYPE_AP_VLAN || vsdata->bss != &sdata->u.ap) continue; ieee80211_set_vif_encap_ops(vsdata); } } void ieee80211_recalc_offload(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD)) return; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; ieee80211_recalc_sdata_offload(sdata); } } void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset) { struct ieee80211_local *local = sdata->local; u32 flags = sdata->u.mntr.flags; #define ADJUST(_f, _s) do { \ if (flags & MONITOR_FLAG_##_f) \ local->fif_##_s += offset; \ } while (0) ADJUST(FCSFAIL, fcsfail); ADJUST(PLCPFAIL, plcpfail); ADJUST(CONTROL, control); ADJUST(CONTROL, pspoll); ADJUST(OTHER_BSS, other_bss); #undef ADJUST } static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; else if (local->hw.queues >= IEEE80211_NUM_ACS) sdata->vif.hw_queue[i] = i; else sdata->vif.hw_queue[i] = 0; } sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; } static void ieee80211_sdata_init(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { sdata->local = local; /* * Initialize the default link, so we can use link_id 0 for non-MLD, * and that continues to work for non-MLD-aware drivers that use just * vif.bss_conf instead of vif.link_conf. * * Note that we never change this, so if link ID 0 isn't used in an * MLD connection, we get a separate allocation for it. */ ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf); } int ieee80211_add_virtual_monitor(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; int ret; ASSERT_RTNL(); lockdep_assert_wiphy(local->hw.wiphy); if (local->monitor_sdata) return 0; sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); if (!sdata) return -ENOMEM; /* set up data */ sdata->vif.type = NL80211_IFTYPE_MONITOR; snprintf(sdata->name, IFNAMSIZ, "%s-monitor", wiphy_name(local->hw.wiphy)); sdata->wdev.iftype = NL80211_IFTYPE_MONITOR; sdata->wdev.wiphy = local->hw.wiphy; ieee80211_sdata_init(local, sdata); ieee80211_set_default_queues(sdata); if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { ret = drv_add_interface(local, sdata); if (WARN_ON(ret)) { /* ok .. stupid driver, it asked for this! */ kfree(sdata); return ret; } } set_bit(SDATA_STATE_RUNNING, &sdata->state); ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR); if (ret) { kfree(sdata); return ret; } mutex_lock(&local->iflist_mtx); rcu_assign_pointer(local->monitor_sdata, sdata); mutex_unlock(&local->iflist_mtx); ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chanreq, IEEE80211_CHANCTX_EXCLUSIVE); if (ret) { mutex_lock(&local->iflist_mtx); RCU_INIT_POINTER(local->monitor_sdata, NULL); mutex_unlock(&local->iflist_mtx); synchronize_net(); drv_remove_interface(local, sdata); kfree(sdata); return ret; } skb_queue_head_init(&sdata->skb_queue); skb_queue_head_init(&sdata->status_queue); wiphy_work_init(&sdata->work, ieee80211_iface_work); return 0; } void ieee80211_del_virtual_monitor(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; ASSERT_RTNL(); lockdep_assert_wiphy(local->hw.wiphy); mutex_lock(&local->iflist_mtx); sdata = rcu_dereference_protected(local->monitor_sdata, lockdep_is_held(&local->iflist_mtx)); if (!sdata) { mutex_unlock(&local->iflist_mtx); return; } RCU_INIT_POINTER(local->monitor_sdata, NULL); mutex_unlock(&local->iflist_mtx); synchronize_net(); ieee80211_link_release_channel(&sdata->deflink); if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) drv_remove_interface(local, sdata); kfree(sdata); } /* * NOTE: Be very careful when changing this function, it must NOT return * an error on interface type changes that have been pre-checked, so most * checks should be in ieee80211_check_concurrent_iface. */ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct net_device *dev = wdev->netdev; struct ieee80211_local *local = sdata->local; u64 changed = 0; int res; u32 hw_reconf_flags = 0; lockdep_assert_wiphy(local->hw.wiphy); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: { struct ieee80211_sub_if_data *master; if (!sdata->bss) return -ENOLINK; list_add(&sdata->u.vlan.list, &sdata->bss->vlans); master = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); sdata->control_port_protocol = master->control_port_protocol; sdata->control_port_no_encrypt = master->control_port_no_encrypt; sdata->control_port_over_nl80211 = master->control_port_over_nl80211; sdata->control_port_no_preauth = master->control_port_no_preauth; sdata->vif.cab_queue = master->vif.cab_queue; memcpy(sdata->vif.hw_queue, master->vif.hw_queue, sizeof(sdata->vif.hw_queue)); sdata->vif.bss_conf.chanreq = master->vif.bss_conf.chanreq; sdata->crypto_tx_tailroom_needed_cnt += master->crypto_tx_tailroom_needed_cnt; break; } case NL80211_IFTYPE_AP: sdata->bss = &sdata->u.ap; break; case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_NAN: /* no special treatment */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_WDS: /* cannot happen */ WARN_ON(1); break; } if (local->open_count == 0) { /* here we can consider everything in good order (again) */ local->reconfig_failure = false; res = drv_start(local); if (res) goto err_del_bss; ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); } /* * Copy the hopefully now-present MAC address to * this interface, if it has the special null one. */ if (dev && is_zero_ether_addr(dev->dev_addr)) { eth_hw_addr_set(dev, local->hw.wiphy->perm_addr); memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); if (!is_valid_ether_addr(dev->dev_addr)) { res = -EADDRNOTAVAIL; goto err_stop; } } switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: /* no need to tell driver, but set carrier and chanctx */ if (sdata->bss->active) { ieee80211_link_vlan_copy_chanctx(&sdata->deflink); netif_carrier_on(dev); ieee80211_set_vif_encap_ops(sdata); } else { netif_carrier_off(dev); } break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs++; break; } if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { res = drv_add_interface(local, sdata); if (res) goto err_stop; } else if (local->monitors == 0 && local->open_count == 0) { res = ieee80211_add_virtual_monitor(local); if (res) goto err_stop; } /* must be before the call to ieee80211_configure_filter */ local->monitors++; if (local->monitors == 1) { local->hw.conf.flags |= IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); ieee80211_recalc_offload(local); ieee80211_recalc_idle(local); netif_carrier_on(dev); break; default: if (coming_up) { ieee80211_del_virtual_monitor(local); ieee80211_set_sdata_offload_flags(sdata); res = drv_add_interface(local, sdata); if (res) goto err_stop; ieee80211_set_vif_encap_ops(sdata); res = ieee80211_check_queues(sdata, ieee80211_vif_type_p2p(&sdata->vif)); if (res) goto err_del_interface; } if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll++; local->fif_probe_req++; ieee80211_configure_filter(local); } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req++; } if (sdata->vif.probe_req_reg) drv_config_iface_filter(local, sdata, FIF_PROBE_REQ, FIF_PROBE_REQ); if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_NAN) changed |= ieee80211_reset_erp_info(sdata); ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: netif_carrier_off(dev); break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: break; default: /* not reached */ WARN_ON(1); } /* * Set default queue parameters so drivers don't * need to initialise the hardware if the hardware * doesn't start up with sane defaults. * Enable QoS for anything but station interfaces. */ ieee80211_set_wmm_default(&sdata->deflink, true, sdata->vif.type != NL80211_IFTYPE_STATION); } switch (sdata->vif.type) { case NL80211_IFTYPE_P2P_DEVICE: rcu_assign_pointer(local->p2p_sdata, sdata); break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) break; list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list); break; default: break; } /* * set_multicast_list will be invoked by the networking core * which will check whether any increments here were done in * error and sync them down to the hardware as filter flags. */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_inc(&local->iff_allmultis); if (coming_up) local->open_count++; if (local->open_count == 1) ieee80211_hw_conf_init(local); else if (hw_reconf_flags) ieee80211_hw_config(local, hw_reconf_flags); ieee80211_recalc_ps(local); set_bit(SDATA_STATE_RUNNING, &sdata->state); return 0; err_del_interface: drv_remove_interface(local, sdata); err_stop: if (!local->open_count) drv_stop(local, false); err_del_bss: sdata->bss = NULL; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) list_del(&sdata->u.vlan.list); /* might already be clear but that doesn't matter */ clear_bit(SDATA_STATE_RUNNING, &sdata->state); return res; } static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &ieee80211_dataif_ops; dev->needs_free_netdev = true; } static void ieee80211_iface_process_skb(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; lockdep_assert_wiphy(local->hw.wiphy); if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) { struct sta_info *sta; int len = skb->len; sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: ieee80211_process_addba_request(local, sta, mgmt, len); break; case WLAN_ACTION_ADDBA_RESP: ieee80211_process_addba_resp(local, sta, mgmt, len); break; case WLAN_ACTION_DELBA: ieee80211_process_delba(sdata, sta, mgmt, len); break; default: WARN_ON(1); break; } } } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_VHT) { switch (mgmt->u.action.u.vht_group_notif.action_code) { case WLAN_VHT_ACTION_OPMODE_NOTIF: { struct ieee80211_rx_status *status; enum nl80211_band band; struct sta_info *sta; u8 opmode; status = IEEE80211_SKB_RXCB(skb); band = status->band; opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) ieee80211_vht_handle_opmode(sdata, &sta->deflink, opmode, band); break; } case WLAN_VHT_ACTION_GROUPID_MGMT: ieee80211_process_mu_groups(sdata, &sdata->deflink, mgmt); break; default: WARN_ON(1); break; } } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_S1G) { switch (mgmt->u.action.u.s1g.action_code) { case WLAN_S1G_TWT_TEARDOWN: case WLAN_S1G_TWT_SETUP: ieee80211_s1g_rx_twt_action(sdata, skb); break; default: break; } } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_EHT) { if (sdata->vif.type == NL80211_IFTYPE_STATION) { switch (mgmt->u.action.u.ttlm_req.action_code) { case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ: ieee80211_process_neg_ttlm_req(sdata, mgmt, skb->len); break; case WLAN_PROTECTED_EHT_ACTION_TTLM_RES: ieee80211_process_neg_ttlm_res(sdata, mgmt, skb->len); break; default: break; } } } else if (ieee80211_is_ext(mgmt->frame_control)) { if (sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_rx_queued_ext(sdata, skb); else WARN_ON(1); } else if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *)mgmt; struct sta_info *sta; /* * So the frame isn't mgmt, but frame_control * is at the right place anyway, of course, so * the if statement is correct. * * Warn if we have other data frame types here, * they must not get here. */ WARN_ON(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); WARN_ON(!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG))); /* * This was a fragment of a frame, received while * a block-ack session was active. That cannot be * right, so terminate the session. */ sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { u16 tid = ieee80211_get_tid(hdr); __ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_REQUIRE_SETUP, true); } } else switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_rx_queued_mgmt(sdata, skb); break; default: WARN(1, "frame for unexpected interface type"); break; } } static void ieee80211_iface_process_status(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_S1G) { switch (mgmt->u.action.u.s1g.action_code) { case WLAN_S1G_TWT_TEARDOWN: case WLAN_S1G_TWT_SETUP: ieee80211_s1g_status_twt_action(sdata, skb); break; default: break; } } } static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, work); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; if (!ieee80211_sdata_running(sdata)) return; if (test_bit(SCAN_SW_SCANNING, &local->scanning)) return; if (!ieee80211_can_run_worker(local)) return; /* first process frames */ while ((skb = skb_dequeue(&sdata->skb_queue))) { kcov_remote_start_common(skb_get_kcov_handle(skb)); if (skb->protocol == cpu_to_be16(ETH_P_TDLS)) ieee80211_process_tdls_channel_switch(sdata, skb); else ieee80211_iface_process_skb(local, sdata, skb); kfree_skb(skb); kcov_remote_stop(); } /* process status queue */ while ((skb = skb_dequeue(&sdata->status_queue))) { kcov_remote_start_common(skb_get_kcov_handle(skb)); ieee80211_iface_process_status(sdata, skb); kfree_skb(skb); kcov_remote_stop(); } /* then other type-dependent work */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_work(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_work(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_work(sdata); break; case NL80211_IFTYPE_OCB: ieee80211_ocb_work(sdata); break; default: break; } } static void ieee80211_activate_links_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, activate_links_work); struct ieee80211_local *local = wiphy_priv(wiphy); if (local->in_reconfig) return; ieee80211_set_active_links(&sdata->vif, sdata->desired_active_links); sdata->desired_active_links = 0; } /* * Helper function to initialise an interface to a specific type. */ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { static const u8 bssid_wildcard[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /* clear type-dependent unions */ memset(&sdata->u, 0, sizeof(sdata->u)); memset(&sdata->deflink.u, 0, sizeof(sdata->deflink.u)); /* and set some type-dependent values */ sdata->vif.type = type; sdata->vif.p2p = false; sdata->wdev.iftype = type; sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); sdata->control_port_no_encrypt = false; sdata->control_port_over_nl80211 = false; sdata->control_port_no_preauth = false; sdata->vif.cfg.idle = true; sdata->vif.bss_conf.txpower = INT_MIN; /* unset */ sdata->noack_map = 0; /* only monitor/p2p-device differ */ if (sdata->dev) { sdata->dev->netdev_ops = &ieee80211_dataif_ops; sdata->dev->type = ARPHRD_ETHER; } skb_queue_head_init(&sdata->skb_queue); skb_queue_head_init(&sdata->status_queue); wiphy_work_init(&sdata->work, ieee80211_iface_work); wiphy_work_init(&sdata->activate_links_work, ieee80211_activate_links_work); switch (type) { case NL80211_IFTYPE_P2P_GO: type = NL80211_IFTYPE_AP; sdata->vif.type = type; sdata->vif.p2p = true; fallthrough; case NL80211_IFTYPE_AP: skb_queue_head_init(&sdata->u.ap.ps.bc_buf); INIT_LIST_HEAD(&sdata->u.ap.vlans); sdata->vif.bss_conf.bssid = sdata->vif.addr; break; case NL80211_IFTYPE_P2P_CLIENT: type = NL80211_IFTYPE_STATION; sdata->vif.type = type; sdata->vif.p2p = true; fallthrough; case NL80211_IFTYPE_STATION: sdata->vif.bss_conf.bssid = sdata->deflink.u.mgd.bssid; ieee80211_sta_setup_sdata(sdata); break; case NL80211_IFTYPE_OCB: sdata->vif.bss_conf.bssid = bssid_wildcard; ieee80211_ocb_setup_sdata(sdata); break; case NL80211_IFTYPE_ADHOC: sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; ieee80211_ibss_setup_sdata(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif)) ieee80211_mesh_init_sdata(sdata); break; case NL80211_IFTYPE_MONITOR: sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; sdata->dev->netdev_ops = &ieee80211_monitorif_ops; sdata->u.mntr.flags = MONITOR_FLAG_CONTROL | MONITOR_FLAG_OTHER_BSS; break; case NL80211_IFTYPE_NAN: idr_init(&sdata->u.nan.function_inst_ids); spin_lock_init(&sdata->u.nan.func_lock); sdata->vif.bss_conf.bssid = sdata->vif.addr; break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_DEVICE: sdata->vif.bss_conf.bssid = sdata->vif.addr; break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_WDS: case NUM_NL80211_IFTYPES: WARN_ON(1); break; } /* need to do this after the switch so vif.type is correct */ ieee80211_link_setup(&sdata->deflink); ieee80211_debugfs_recreate_netdev(sdata, false); } static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { struct ieee80211_local *local = sdata->local; int ret, err; enum nl80211_iftype internal_type = type; bool p2p = false; ASSERT_RTNL(); if (!local->ops->change_interface) return -EBUSY; /* for now, don't support changing while links exist */ if (ieee80211_vif_is_mld(&sdata->vif)) return -EBUSY; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: if (!list_empty(&sdata->u.ap.vlans)) return -EBUSY; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_OCB: /* * Could maybe also all others here? * Just not sure how that interacts * with the RX/config path e.g. for * mesh. */ break; default: return -EBUSY; } switch (type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_OCB: /* * Could probably support everything * but here. */ break; case NL80211_IFTYPE_P2P_CLIENT: p2p = true; internal_type = NL80211_IFTYPE_STATION; break; case NL80211_IFTYPE_P2P_GO: p2p = true; internal_type = NL80211_IFTYPE_AP; break; default: return -EBUSY; } ret = ieee80211_check_concurrent_iface(sdata, internal_type); if (ret) return ret; ieee80211_stop_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); /* do_stop will synchronize_rcu() first thing */ ieee80211_do_stop(sdata, false); ieee80211_teardown_sdata(sdata); ieee80211_set_sdata_offload_flags(sdata); ret = drv_change_interface(local, sdata, internal_type, p2p); if (ret) type = ieee80211_vif_type_p2p(&sdata->vif); /* * Ignore return value here, there's not much we can do since * the driver changed the interface type internally already. * The warnings will hopefully make driver authors fix it :-) */ ieee80211_check_queues(sdata, type); ieee80211_setup_sdata(sdata, type); ieee80211_set_vif_encap_ops(sdata); err = ieee80211_do_open(&sdata->wdev, false); WARN(err, "type change: do_open returned %d", err); ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); return ret; } int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { int ret; ASSERT_RTNL(); if (type == ieee80211_vif_type_p2p(&sdata->vif)) return 0; if (ieee80211_sdata_running(sdata)) { ret = ieee80211_runtime_change_iftype(sdata, type); if (ret) return ret; } else { /* Purge and reset type-dependent state. */ ieee80211_teardown_sdata(sdata); ieee80211_setup_sdata(sdata, type); } /* reset some values that shouldn't be kept across type changes */ if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = false; return 0; } static void ieee80211_assign_perm_addr(struct ieee80211_local *local, u8 *perm_addr, enum nl80211_iftype type) { struct ieee80211_sub_if_data *sdata; u64 mask, start, addr, val, inc; u8 *m; u8 tmp_addr[ETH_ALEN]; int i; lockdep_assert_wiphy(local->hw.wiphy); /* default ... something at least */ memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && local->hw.wiphy->n_addresses <= 1) return; switch (type) { case NL80211_IFTYPE_MONITOR: /* doesn't matter */ break; case NL80211_IFTYPE_AP_VLAN: /* match up with an AP interface */ list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP) continue; memcpy(perm_addr, sdata->vif.addr, ETH_ALEN); break; } /* keep default if no AP interface present */ break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: if (ieee80211_hw_check(&local->hw, P2P_DEV_ADDR_FOR_INTF)) { list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) continue; if (!ieee80211_sdata_running(sdata)) continue; memcpy(perm_addr, sdata->vif.addr, ETH_ALEN); return; } } fallthrough; default: /* assign a new address if possible -- try n_addresses first */ for (i = 0; i < local->hw.wiphy->n_addresses; i++) { bool used = false; list_for_each_entry(sdata, &local->interfaces, list) { if (ether_addr_equal(local->hw.wiphy->addresses[i].addr, sdata->vif.addr)) { used = true; break; } } if (!used) { memcpy(perm_addr, local->hw.wiphy->addresses[i].addr, ETH_ALEN); break; } } /* try mask if available */ if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) break; m = local->hw.wiphy->addr_mask; mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { /* not a contiguous mask ... not handled now! */ pr_info("not contiguous\n"); break; } /* * Pick address of existing interface in case user changed * MAC address manually, default to perm_addr. */ m = local->hw.wiphy->perm_addr; list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type == NL80211_IFTYPE_MONITOR) continue; m = sdata->vif.addr; break; } start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); inc = 1ULL<<__ffs64(mask); val = (start & mask); addr = (start & ~mask) | (val & mask); do { bool used = false; tmp_addr[5] = addr >> 0*8; tmp_addr[4] = addr >> 1*8; tmp_addr[3] = addr >> 2*8; tmp_addr[2] = addr >> 3*8; tmp_addr[1] = addr >> 4*8; tmp_addr[0] = addr >> 5*8; val += inc; list_for_each_entry(sdata, &local->interfaces, list) { if (ether_addr_equal(tmp_addr, sdata->vif.addr)) { used = true; break; } } if (!used) { memcpy(perm_addr, tmp_addr, ETH_ALEN); break; } addr = (start & ~mask) | (val & mask); } while (addr != start); break; } } int ieee80211_if_add(struct ieee80211_local *local, const char *name, unsigned char name_assign_type, struct wireless_dev **new_wdev, enum nl80211_iftype type, struct vif_params *params) { struct net_device *ndev = NULL; struct ieee80211_sub_if_data *sdata = NULL; struct txq_info *txqi; int ret, i; ASSERT_RTNL(); lockdep_assert_wiphy(local->hw.wiphy); if (type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN) { struct wireless_dev *wdev; sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); if (!sdata) return -ENOMEM; wdev = &sdata->wdev; sdata->dev = NULL; strscpy(sdata->name, name, IFNAMSIZ); ieee80211_assign_perm_addr(local, wdev->address, type); memcpy(sdata->vif.addr, wdev->address, ETH_ALEN); ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); } else { int size = ALIGN(sizeof(*sdata) + local->hw.vif_data_size, sizeof(void *)); int txq_size = 0; if (type != NL80211_IFTYPE_AP_VLAN && (type != NL80211_IFTYPE_MONITOR || (params->flags & MONITOR_FLAG_ACTIVE))) txq_size += sizeof(struct txq_info) + local->hw.txq_data_size; ndev = alloc_netdev_mqs(size + txq_size, name, name_assign_type, ieee80211_if_setup, 1, 1); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; ndev->needed_headroom = local->tx_headroom + 4*6 /* four MAC addresses */ + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ + 6 /* mesh */ + 8 /* rfc1042/bridge tunnel */ - ETH_HLEN /* ethernet hard_header_len */ + IEEE80211_ENCRYPT_HEADROOM; ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) { free_netdev(ndev); return ret; } ieee80211_assign_perm_addr(local, ndev->perm_addr, type); if (is_valid_ether_addr(params->macaddr)) eth_hw_addr_set(ndev, params->macaddr); else eth_hw_addr_set(ndev, ndev->perm_addr); SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */ sdata = netdev_priv(ndev); ndev->ieee80211_ptr = &sdata->wdev; memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); memcpy(sdata->name, ndev->name, IFNAMSIZ); if (txq_size) { txqi = netdev_priv(ndev) + size; ieee80211_txq_init(sdata, NULL, txqi, 0); } sdata->dev = ndev; } /* initialise type-independent data */ sdata->wdev.wiphy = local->hw.wiphy; ieee80211_sdata_init(local, sdata); ieee80211_init_frag_cache(&sdata->frags); INIT_LIST_HEAD(&sdata->key_list); wiphy_delayed_work_init(&sdata->dec_tailroom_needed_wk, ieee80211_delayed_tailroom_dec); for (i = 0; i < NUM_NL80211_BANDS; i++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[i]; sdata->rc_rateidx_mask[i] = sband ? (1 << sband->n_bitrates) - 1 : 0; if (sband) { __le16 cap; u16 *vht_rate_mask; memcpy(sdata->rc_rateidx_mcs_mask[i], sband->ht_cap.mcs.rx_mask, sizeof(sdata->rc_rateidx_mcs_mask[i])); cap = sband->vht_cap.vht_mcs.rx_mcs_map; vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i]; ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask); } else { memset(sdata->rc_rateidx_mcs_mask[i], 0, sizeof(sdata->rc_rateidx_mcs_mask[i])); memset(sdata->rc_rateidx_vht_mcs_mask[i], 0, sizeof(sdata->rc_rateidx_vht_mcs_mask[i])); } } ieee80211_set_default_queues(sdata); sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL; sdata->deflink.user_power_level = local->user_power_level; /* setup type-dependent data */ ieee80211_setup_sdata(sdata, type); if (ndev) { ndev->ieee80211_ptr->use_4addr = params->use_4addr; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = params->use_4addr; ndev->features |= local->hw.netdev_features; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ndev->hw_features |= ndev->features & MAC80211_SUPPORTED_FEATURES_TX; sdata->vif.netdev_features = local->hw.netdev_features; netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops); /* MTU range is normally 256 - 2304, where the upper limit is * the maximum MSDU size. Monitor interfaces send and receive * MPDU and A-MSDU frames which may be much larger so we do * not impose an upper limit in that case. */ ndev->min_mtu = 256; if (type == NL80211_IFTYPE_MONITOR) ndev->max_mtu = 0; else ndev->max_mtu = local->hw.max_mtu; ret = cfg80211_register_netdevice(ndev); if (ret) { free_netdev(ndev); return ret; } } mutex_lock(&local->iflist_mtx); list_add_tail_rcu(&sdata->list, &local->interfaces); mutex_unlock(&local->iflist_mtx); if (new_wdev) *new_wdev = &sdata->wdev; return 0; } void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) { ASSERT_RTNL(); lockdep_assert_wiphy(sdata->local->hw.wiphy); mutex_lock(&sdata->local->iflist_mtx); list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); if (sdata->vif.txq) ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq)); synchronize_rcu(); cfg80211_unregister_wdev(&sdata->wdev); if (!sdata->dev) { ieee80211_teardown_sdata(sdata); kfree(sdata); } } void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) { if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) return; ieee80211_do_stop(sdata, true); } void ieee80211_remove_interfaces(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *tmp; LIST_HEAD(unreg_list); ASSERT_RTNL(); /* Before destroying the interfaces, make sure they're all stopped so * that the hardware is stopped. Otherwise, the driver might still be * iterating the interfaces during the shutdown, e.g. from a worker * or from RX processing or similar, and if it does so (using atomic * iteration) while we're manipulating the list, the iteration will * crash. * * After this, the hardware should be stopped and the driver should * have stopped all of its activities, so that we can do RCU-unaware * manipulations of the interface list below. */ cfg80211_shutdown_all_interfaces(local->hw.wiphy); wiphy_lock(local->hw.wiphy); WARN(local->open_count, "%s: open count remains %d\n", wiphy_name(local->hw.wiphy), local->open_count); mutex_lock(&local->iflist_mtx); list_splice_init(&local->interfaces, &unreg_list); mutex_unlock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &unreg_list, list) { bool netdev = sdata->dev; /* * Remove IP addresses explicitly, since the notifier will * skip the callbacks if wdev->registered is false, since * we can't acquire the wiphy_lock() again there if already * inside this locked section. */ sdata->vif.cfg.arp_addr_cnt = 0; if (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.associated) ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER); list_del(&sdata->list); cfg80211_unregister_wdev(&sdata->wdev); if (!netdev) kfree(sdata); } wiphy_unlock(local->hw.wiphy); } static int netdev_notify(struct notifier_block *nb, unsigned long state, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ieee80211_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) return NOTIFY_DONE; if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) return NOTIFY_DONE; if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->name, dev->name, IFNAMSIZ); ieee80211_debugfs_rename_netdev(sdata); return NOTIFY_OK; } static struct notifier_block mac80211_netdev_notifier = { .notifier_call = netdev_notify, }; int ieee80211_iface_init(void) { return register_netdevice_notifier(&mac80211_netdev_notifier); } void ieee80211_iface_exit(void) { unregister_netdevice_notifier(&mac80211_netdev_notifier); } void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_AP) atomic_inc(&sdata->u.ap.num_mcast_sta); else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) atomic_inc(&sdata->u.vlan.num_mcast_sta); } void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_AP) atomic_dec(&sdata->u.ap.num_mcast_sta); else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) atomic_dec(&sdata->u.vlan.num_mcast_sta); } void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; if (ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) return; ieee80211_stop_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_blocked_queues = true; } void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; if (sdata->csa_blocked_queues) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); sdata->csa_blocked_queues = false; } } |
open /syzkaller/managers/ci2-upstream-usb/kernel/security/selinux/flask.h: no such file or directory
1 4 4 1 1 1 1 3 4 2 4 1 3 3 3 4 2 2 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 | // SPDX-License-Identifier: GPL-2.0 /* * usb-serial driver for Quatech USB 2 devices * * Copyright (C) 2012 Bill Pemberton (wfp5p@virginia.edu) * * These devices all have only 1 bulk in and 1 bulk out that is shared * for all serial ports. * */ #include <linux/unaligned.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial_reg.h> #include <linux/uaccess.h> /* default urb timeout for usb operations */ #define QT2_USB_TIMEOUT USB_CTRL_SET_TIMEOUT #define QT_OPEN_CLOSE_CHANNEL 0xca #define QT_SET_GET_DEVICE 0xc2 #define QT_SET_GET_REGISTER 0xc0 #define QT_GET_SET_PREBUF_TRIG_LVL 0xcc #define QT_SET_ATF 0xcd #define QT_TRANSFER_IN 0xc0 #define QT_HW_FLOW_CONTROL_MASK 0xc5 #define QT_SW_FLOW_CONTROL_MASK 0xc6 #define QT2_BREAK_CONTROL 0xc8 #define QT2_GET_SET_UART 0xc1 #define QT2_FLUSH_DEVICE 0xc4 #define QT2_GET_SET_QMCR 0xe1 #define QT2_QMCR_RS232 0x40 #define QT2_QMCR_RS422 0x10 #define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS) #define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR) /* status bytes for the device */ #define QT2_CONTROL_BYTE 0x1b #define QT2_LINE_STATUS 0x00 /* following 1 byte is line status */ #define QT2_MODEM_STATUS 0x01 /* following 1 byte is modem status */ #define QT2_XMIT_HOLD 0x02 /* following 2 bytes are ?? */ #define QT2_CHANGE_PORT 0x03 /* following 1 byte is port to change to */ #define QT2_REC_FLUSH 0x04 /* no following info */ #define QT2_XMIT_FLUSH 0x05 /* no following info */ #define QT2_CONTROL_ESCAPE 0xff /* pass through previous 2 control bytes */ #define MAX_BAUD_RATE 921600 #define DEFAULT_BAUD_RATE 9600 #define QT2_READ_BUFFER_SIZE 512 /* size of read buffer */ #define QT2_WRITE_BUFFER_SIZE 512 /* size of write buffer */ #define QT2_WRITE_CONTROL_SIZE 5 /* control bytes used for a write */ #define DRIVER_DESC "Quatech 2nd gen USB to Serial Driver" #define USB_VENDOR_ID_QUATECH 0x061d #define QUATECH_SSU2_100 0xC120 /* RS232 single port */ #define QUATECH_DSU2_100 0xC140 /* RS232 dual port */ #define QUATECH_DSU2_400 0xC150 /* RS232/422/485 dual port */ #define QUATECH_QSU2_100 0xC160 /* RS232 four port */ #define QUATECH_QSU2_400 0xC170 /* RS232/422/485 four port */ #define QUATECH_ESU2_100 0xC1A0 /* RS232 eight port */ #define QUATECH_ESU2_400 0xC180 /* RS232/422/485 eight port */ struct qt2_device_detail { int product_id; int num_ports; }; #define QT_DETAILS(prod, ports) \ .product_id = (prod), \ .num_ports = (ports) static const struct qt2_device_detail qt2_device_details[] = { {QT_DETAILS(QUATECH_SSU2_100, 1)}, {QT_DETAILS(QUATECH_DSU2_400, 2)}, {QT_DETAILS(QUATECH_DSU2_100, 2)}, {QT_DETAILS(QUATECH_QSU2_400, 4)}, {QT_DETAILS(QUATECH_QSU2_100, 4)}, {QT_DETAILS(QUATECH_ESU2_400, 8)}, {QT_DETAILS(QUATECH_ESU2_100, 8)}, {QT_DETAILS(0, 0)} /* Terminating entry */ }; static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU2_400)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU2_400)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU2_400)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); struct qt2_serial_private { unsigned char current_port; /* current port for incoming data */ struct urb *read_urb; /* shared among all ports */ char *read_buffer; }; struct qt2_port_private { u8 device_port; spinlock_t urb_lock; bool urb_in_use; struct urb *write_urb; char *write_buffer; spinlock_t lock; u8 shadowLSR; u8 shadowMSR; struct usb_serial_port *port; }; static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch); static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch); static void qt2_write_bulk_callback(struct urb *urb); static void qt2_read_bulk_callback(struct urb *urb); static void qt2_release(struct usb_serial *serial) { struct qt2_serial_private *serial_priv; serial_priv = usb_get_serial_data(serial); usb_kill_urb(serial_priv->read_urb); usb_free_urb(serial_priv->read_urb); kfree(serial_priv->read_buffer); kfree(serial_priv); } static inline int calc_baud_divisor(int baudrate) { int divisor, rem; divisor = MAX_BAUD_RATE / baudrate; rem = MAX_BAUD_RATE % baudrate; /* Round to nearest divisor */ if (((rem * 2) >= baudrate) && (baudrate != 110)) divisor++; return divisor; } static inline int qt2_set_port_config(struct usb_device *dev, unsigned char port_number, u16 baudrate, u16 lcr) { int divisor = calc_baud_divisor(baudrate); u16 index = ((u16) (lcr << 8) | (u16) (port_number)); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), QT2_GET_SET_UART, 0x40, divisor, index, NULL, 0, QT2_USB_TIMEOUT); } static inline int qt2_control_msg(struct usb_device *dev, u8 request, u16 data, u16 index) { return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, 0x40, data, index, NULL, 0, QT2_USB_TIMEOUT); } static inline int qt2_getregister(struct usb_device *dev, u8 uart, u8 reg, u8 *data) { int ret; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), QT_SET_GET_REGISTER, 0xc0, reg, uart, data, sizeof(*data), QT2_USB_TIMEOUT); if (ret < (int)sizeof(*data)) { if (ret >= 0) ret = -EIO; } return ret; } static inline int qt2_setregister(struct usb_device *dev, u8 uart, u8 reg, u16 data) { u16 value = (data << 8) | reg; return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), QT_SET_GET_REGISTER, 0x40, value, uart, NULL, 0, QT2_USB_TIMEOUT); } static inline int update_mctrl(struct qt2_port_private *port_priv, unsigned int set, unsigned int clear) { struct usb_serial_port *port = port_priv->port; struct usb_device *dev = port->serial->dev; unsigned urb_value; int status; if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) { dev_dbg(&port->dev, "update_mctrl - DTR|RTS not being set|cleared\n"); return 0; /* no change */ } clear &= ~set; /* 'set' takes precedence over 'clear' */ urb_value = 0; if (set & TIOCM_DTR) urb_value |= UART_MCR_DTR; if (set & TIOCM_RTS) urb_value |= UART_MCR_RTS; status = qt2_setregister(dev, port_priv->device_port, UART_MCR, urb_value); if (status < 0) dev_err(&port->dev, "update_mctrl - Error from MODEM_CTRL urb: %i\n", status); return status; } static int qt2_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { struct qt2_device_detail d; int i; for (i = 0; d = qt2_device_details[i], d.product_id != 0; i++) { if (d.product_id == le16_to_cpu(serial->dev->descriptor.idProduct)) return d.num_ports; } /* we didn't recognize the device */ dev_err(&serial->dev->dev, "don't know the number of ports, assuming 1\n"); return 1; } static void qt2_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_device *dev = port->serial->dev; struct qt2_port_private *port_priv; struct ktermios *termios = &tty->termios; u16 baud; unsigned int cflag = termios->c_cflag; u16 new_lcr = 0; int status; port_priv = usb_get_serial_port_data(port); if (cflag & PARENB) { if (cflag & PARODD) new_lcr |= UART_LCR_PARITY; else new_lcr |= SERIAL_EVEN_PARITY; } new_lcr |= UART_LCR_WLEN(tty_get_char_size(cflag)); baud = tty_get_baud_rate(tty); if (!baud) baud = 9600; status = qt2_set_port_config(dev, port_priv->device_port, baud, new_lcr); if (status < 0) dev_err(&port->dev, "%s - qt2_set_port_config failed: %i\n", __func__, status); if (cflag & CRTSCTS) status = qt2_control_msg(dev, QT_HW_FLOW_CONTROL_MASK, SERIAL_CRTSCTS, port_priv->device_port); else status = qt2_control_msg(dev, QT_HW_FLOW_CONTROL_MASK, 0, port_priv->device_port); if (status < 0) dev_err(&port->dev, "%s - set HW flow control failed: %i\n", __func__, status); if (I_IXOFF(tty) || I_IXON(tty)) { u16 x = ((u16) (START_CHAR(tty) << 8) | (u16) (STOP_CHAR(tty))); status = qt2_control_msg(dev, QT_SW_FLOW_CONTROL_MASK, x, port_priv->device_port); } else status = qt2_control_msg(dev, QT_SW_FLOW_CONTROL_MASK, 0, port_priv->device_port); if (status < 0) dev_err(&port->dev, "%s - set SW flow control failed: %i\n", __func__, status); } static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial; struct qt2_port_private *port_priv; u8 *data; u16 device_port; int status; unsigned long flags; device_port = port->port_number; serial = port->serial; port_priv = usb_get_serial_port_data(port); /* set the port to RS232 mode */ status = qt2_control_msg(serial->dev, QT2_GET_SET_QMCR, QT2_QMCR_RS232, device_port); if (status < 0) { dev_err(&port->dev, "%s failed to set RS232 mode for port %i error %i\n", __func__, device_port, status); return status; } data = kzalloc(2, GFP_KERNEL); if (!data) return -ENOMEM; /* open the port */ status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), QT_OPEN_CLOSE_CHANNEL, 0xc0, 0, device_port, data, 2, QT2_USB_TIMEOUT); if (status < 2) { dev_err(&port->dev, "%s - open port failed %i\n", __func__, status); if (status >= 0) status = -EIO; kfree(data); return status; } spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowLSR = data[0]; port_priv->shadowMSR = data[1]; spin_unlock_irqrestore(&port_priv->lock, flags); kfree(data); /* set to default speed and 8bit word size */ status = qt2_set_port_config(serial->dev, device_port, DEFAULT_BAUD_RATE, UART_LCR_WLEN8); if (status < 0) { dev_err(&port->dev, "%s - initial setup failed (%i)\n", __func__, device_port); return status; } port_priv->device_port = (u8) device_port; if (tty) qt2_set_termios(tty, port, &tty->termios); return 0; } static void qt2_close(struct usb_serial_port *port) { struct usb_serial *serial; struct qt2_port_private *port_priv; int i; serial = port->serial; port_priv = usb_get_serial_port_data(port); usb_kill_urb(port_priv->write_urb); /* flush the port transmit buffer */ i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), QT2_FLUSH_DEVICE, 0x40, 1, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); if (i < 0) dev_err(&port->dev, "%s - transmit buffer flush failed: %i\n", __func__, i); /* flush the port receive buffer */ i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), QT2_FLUSH_DEVICE, 0x40, 0, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); if (i < 0) dev_err(&port->dev, "%s - receive buffer flush failed: %i\n", __func__, i); /* close the port */ i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), QT_OPEN_CLOSE_CHANNEL, 0x40, 0, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); if (i < 0) dev_err(&port->dev, "%s - close port failed %i\n", __func__, i); } static void qt2_disconnect(struct usb_serial *serial) { struct qt2_serial_private *serial_priv = usb_get_serial_data(serial); usb_kill_urb(serial_priv->read_urb); } static void qt2_process_status(struct usb_serial_port *port, unsigned char *ch) { switch (*ch) { case QT2_LINE_STATUS: qt2_update_lsr(port, ch + 1); break; case QT2_MODEM_STATUS: qt2_update_msr(port, ch + 1); break; } } static void qt2_process_read_urb(struct urb *urb) { struct usb_serial *serial; struct qt2_serial_private *serial_priv; struct usb_serial_port *port; bool escapeflag; unsigned char *ch; int i; unsigned char newport; int len = urb->actual_length; if (!len) return; ch = urb->transfer_buffer; serial = urb->context; serial_priv = usb_get_serial_data(serial); port = serial->port[serial_priv->current_port]; for (i = 0; i < urb->actual_length; i++) { ch = (unsigned char *)urb->transfer_buffer + i; if ((i <= (len - 3)) && (*ch == QT2_CONTROL_BYTE) && (*(ch + 1) == QT2_CONTROL_BYTE)) { escapeflag = false; switch (*(ch + 2)) { case QT2_LINE_STATUS: case QT2_MODEM_STATUS: if (i > (len - 4)) { dev_warn(&port->dev, "%s - status message too short\n", __func__); break; } qt2_process_status(port, ch + 2); i += 3; escapeflag = true; break; case QT2_XMIT_HOLD: if (i > (len - 5)) { dev_warn(&port->dev, "%s - xmit_empty message too short\n", __func__); break; } /* bytes_written = (ch[1] << 4) + ch[0]; */ i += 4; escapeflag = true; break; case QT2_CHANGE_PORT: if (i > (len - 4)) { dev_warn(&port->dev, "%s - change_port message too short\n", __func__); break; } tty_flip_buffer_push(&port->port); newport = *(ch + 3); if (newport > serial->num_ports) { dev_err(&port->dev, "%s - port change to invalid port: %i\n", __func__, newport); break; } serial_priv->current_port = newport; port = serial->port[serial_priv->current_port]; i += 3; escapeflag = true; break; case QT2_REC_FLUSH: case QT2_XMIT_FLUSH: i += 2; escapeflag = true; break; case QT2_CONTROL_ESCAPE: tty_insert_flip_string(&port->port, ch, 2); i += 2; escapeflag = true; break; default: dev_warn(&port->dev, "%s - unsupported command %i\n", __func__, *(ch + 2)); break; } if (escapeflag) continue; } tty_insert_flip_char(&port->port, *ch, TTY_NORMAL); } tty_flip_buffer_push(&port->port); } static void qt2_write_bulk_callback(struct urb *urb) { struct usb_serial_port *port; struct qt2_port_private *port_priv; unsigned long flags; port = urb->context; port_priv = usb_get_serial_port_data(port); spin_lock_irqsave(&port_priv->urb_lock, flags); port_priv->urb_in_use = false; usb_serial_port_softint(port); spin_unlock_irqrestore(&port_priv->urb_lock, flags); } static void qt2_read_bulk_callback(struct urb *urb) { struct usb_serial *serial = urb->context; int status; if (urb->status) { dev_warn(&serial->dev->dev, "%s - non-zero urb status: %i\n", __func__, urb->status); return; } qt2_process_read_urb(urb); status = usb_submit_urb(urb, GFP_ATOMIC); if (status != 0) dev_err(&serial->dev->dev, "%s - resubmit read urb failed: %i\n", __func__, status); } static int qt2_setup_urbs(struct usb_serial *serial) { struct usb_serial_port *port0; struct qt2_serial_private *serial_priv; int status; port0 = serial->port[0]; serial_priv = usb_get_serial_data(serial); serial_priv->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!serial_priv->read_urb) return -ENOMEM; usb_fill_bulk_urb(serial_priv->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, port0->bulk_in_endpointAddress), serial_priv->read_buffer, QT2_READ_BUFFER_SIZE, qt2_read_bulk_callback, serial); status = usb_submit_urb(serial_priv->read_urb, GFP_KERNEL); if (status != 0) { dev_err(&serial->dev->dev, "%s - submit read urb failed %i\n", __func__, status); usb_free_urb(serial_priv->read_urb); return status; } return 0; } static int qt2_attach(struct usb_serial *serial) { struct qt2_serial_private *serial_priv; int status; /* power on unit */ status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0xc2, 0x40, 0x8000, 0, NULL, 0, QT2_USB_TIMEOUT); if (status < 0) { dev_err(&serial->dev->dev, "%s - failed to power on unit: %i\n", __func__, status); return status; } serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL); if (!serial_priv) return -ENOMEM; serial_priv->read_buffer = kmalloc(QT2_READ_BUFFER_SIZE, GFP_KERNEL); if (!serial_priv->read_buffer) { status = -ENOMEM; goto err_buf; } usb_set_serial_data(serial, serial_priv); status = qt2_setup_urbs(serial); if (status != 0) goto attach_failed; return 0; attach_failed: kfree(serial_priv->read_buffer); err_buf: kfree(serial_priv); return status; } static int qt2_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct qt2_port_private *port_priv; u8 bEndpointAddress; port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); if (!port_priv) return -ENOMEM; spin_lock_init(&port_priv->lock); spin_lock_init(&port_priv->urb_lock); port_priv->port = port; port_priv->write_buffer = kmalloc(QT2_WRITE_BUFFER_SIZE, GFP_KERNEL); if (!port_priv->write_buffer) goto err_buf; port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); if (!port_priv->write_urb) goto err_urb; bEndpointAddress = serial->port[0]->bulk_out_endpointAddress; usb_fill_bulk_urb(port_priv->write_urb, serial->dev, usb_sndbulkpipe(serial->dev, bEndpointAddress), port_priv->write_buffer, QT2_WRITE_BUFFER_SIZE, qt2_write_bulk_callback, port); usb_set_serial_port_data(port, port_priv); return 0; err_urb: kfree(port_priv->write_buffer); err_buf: kfree(port_priv); return -ENOMEM; } static void qt2_port_remove(struct usb_serial_port *port) { struct qt2_port_private *port_priv; port_priv = usb_get_serial_port_data(port); usb_free_urb(port_priv->write_urb); kfree(port_priv->write_buffer); kfree(port_priv); } static int qt2_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_device *dev = port->serial->dev; struct qt2_port_private *port_priv = usb_get_serial_port_data(port); u8 *d; int r; d = kzalloc(2, GFP_KERNEL); if (!d) return -ENOMEM; r = qt2_getregister(dev, port_priv->device_port, UART_MCR, d); if (r < 0) goto mget_out; r = qt2_getregister(dev, port_priv->device_port, UART_MSR, d + 1); if (r < 0) goto mget_out; r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) | (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) | (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) | (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) | (d[1] & UART_MSR_RI ? TIOCM_RI : 0) | (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0); mget_out: kfree(d); return r; } static int qt2_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct qt2_port_private *port_priv; port_priv = usb_get_serial_port_data(tty->driver_data); return update_mctrl(port_priv, set, clear); } static int qt2_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct qt2_port_private *port_priv; int status; u16 val; port_priv = usb_get_serial_port_data(port); val = (break_state == -1) ? 1 : 0; status = qt2_control_msg(port->serial->dev, QT2_BREAK_CONTROL, val, port_priv->device_port); if (status < 0) { dev_warn(&port->dev, "%s - failed to send control message: %i\n", __func__, status); return status; } return 0; } static void qt2_dtr_rts(struct usb_serial_port *port, int on) { struct usb_device *dev = port->serial->dev; struct qt2_port_private *port_priv = usb_get_serial_port_data(port); /* Disable flow control */ if (!on) { if (qt2_setregister(dev, port_priv->device_port, UART_MCR, 0) < 0) dev_warn(&port->dev, "error from flowcontrol urb\n"); } /* drop RTS and DTR */ if (on) update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0); else update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS); } static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) { struct qt2_port_private *port_priv; u8 newMSR = (u8) *ch; unsigned long flags; /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); if (!port_priv) return; spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowMSR = newMSR; spin_unlock_irqrestore(&port_priv->lock, flags); if (newMSR & UART_MSR_ANY_DELTA) { /* update input line counters */ if (newMSR & UART_MSR_DCTS) port->icount.cts++; if (newMSR & UART_MSR_DDSR) port->icount.dsr++; if (newMSR & UART_MSR_DDCD) port->icount.dcd++; if (newMSR & UART_MSR_TERI) port->icount.rng++; wake_up_interruptible(&port->port.delta_msr_wait); } } static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch) { struct qt2_port_private *port_priv; struct async_icount *icount; unsigned long flags; u8 newLSR = (u8) *ch; /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); if (!port_priv) return; if (newLSR & UART_LSR_BI) newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI); spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowLSR = newLSR; spin_unlock_irqrestore(&port_priv->lock, flags); icount = &port->icount; if (newLSR & UART_LSR_BRK_ERROR_BITS) { if (newLSR & UART_LSR_BI) icount->brk++; if (newLSR & UART_LSR_OE) icount->overrun++; if (newLSR & UART_LSR_PE) icount->parity++; if (newLSR & UART_LSR_FE) icount->frame++; } } static unsigned int qt2_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct qt2_port_private *port_priv; unsigned long flags; unsigned int r; port_priv = usb_get_serial_port_data(port); spin_lock_irqsave(&port_priv->urb_lock, flags); if (port_priv->urb_in_use) r = 0; else r = QT2_WRITE_BUFFER_SIZE - QT2_WRITE_CONTROL_SIZE; spin_unlock_irqrestore(&port_priv->urb_lock, flags); return r; } static int qt2_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct qt2_port_private *port_priv; struct urb *write_urb; unsigned char *data; unsigned long flags; int status; int bytes_out = 0; port_priv = usb_get_serial_port_data(port); if (port_priv->write_urb == NULL) { dev_err(&port->dev, "%s - no output urb\n", __func__); return 0; } write_urb = port_priv->write_urb; count = min(count, QT2_WRITE_BUFFER_SIZE - QT2_WRITE_CONTROL_SIZE); data = write_urb->transfer_buffer; spin_lock_irqsave(&port_priv->urb_lock, flags); if (port_priv->urb_in_use) { dev_err(&port->dev, "qt2_write - urb is in use\n"); goto write_out; } *data++ = QT2_CONTROL_BYTE; *data++ = QT2_CONTROL_BYTE; *data++ = port_priv->device_port; put_unaligned_le16(count, data); data += 2; memcpy(data, buf, count); write_urb->transfer_buffer_length = count + QT2_WRITE_CONTROL_SIZE; status = usb_submit_urb(write_urb, GFP_ATOMIC); if (status == 0) { port_priv->urb_in_use = true; bytes_out += count; } write_out: spin_unlock_irqrestore(&port_priv->urb_lock, flags); return bytes_out; } static struct usb_serial_driver qt2_device = { .driver = { .name = "quatech-serial", }, .description = DRIVER_DESC, .id_table = id_table, .open = qt2_open, .close = qt2_close, .write = qt2_write, .write_room = qt2_write_room, .calc_num_ports = qt2_calc_num_ports, .attach = qt2_attach, .release = qt2_release, .disconnect = qt2_disconnect, .port_probe = qt2_port_probe, .port_remove = qt2_port_remove, .dtr_rts = qt2_dtr_rts, .break_ctl = qt2_break_ctl, .tiocmget = qt2_tiocmget, .tiocmset = qt2_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .set_termios = qt2_set_termios, }; static struct usb_serial_driver *const serial_drivers[] = { &qt2_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2"); |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 | // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant Linux driver for the TwinhanDTV StarBox USB2.0 DVB-S * receiver. * * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de> * Metzler Brothers Systementwicklung GbR * * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de> * * Thanks to Twinhan who kindly provided hardware and information. * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "vp702x.h" #include <linux/mutex.h> /* debug */ int dvb_usb_vp702x_debug; module_param_named(debug,dvb_usb_vp702x_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct vp702x_adapter_state { int pid_filter_count; int pid_filter_can_bypass; u8 pid_filter_state; }; static int vp702x_usb_in_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, USB_TYPE_VENDOR | USB_DIR_IN, value, index, b, blen, 2000); if (ret < 0) { warn("usb in operation failed. (%d)", ret); ret = -EIO; } else ret = 0; deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ",req,value,index); debug_dump(b,blen,deb_xfer); return ret; } int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; mutex_lock(&d->usb_mutex); ret = vp702x_usb_in_op_unlocked(d, req, value, index, b, blen); mutex_unlock(&d->usb_mutex); return ret; } static int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ",req,value,index); debug_dump(b,blen,deb_xfer); if ((ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev,0), req, USB_TYPE_VENDOR | USB_DIR_OUT, value,index,b,blen, 2000)) != blen) { warn("usb out operation failed. (%d)",ret); return -EIO; } else return 0; } static int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; mutex_lock(&d->usb_mutex); ret = vp702x_usb_out_op_unlocked(d, req, value, index, b, blen); mutex_unlock(&d->usb_mutex); return ret; } int vp702x_usb_inout_op(struct dvb_usb_device *d, u8 *o, int olen, u8 *i, int ilen, int msec) { int ret; if ((ret = mutex_lock_interruptible(&d->usb_mutex))) return ret; ret = vp702x_usb_out_op_unlocked(d, REQUEST_OUT, 0, 0, o, olen); msleep(msec); ret = vp702x_usb_in_op_unlocked(d, REQUEST_IN, 0, 0, i, ilen); mutex_unlock(&d->usb_mutex); return ret; } static int vp702x_usb_inout_cmd(struct dvb_usb_device *d, u8 cmd, u8 *o, int olen, u8 *i, int ilen, int msec) { struct vp702x_device_state *st = d->priv; int ret = 0; u8 *buf; int buflen = max(olen + 2, ilen + 1); ret = mutex_lock_interruptible(&st->buf_mutex); if (ret < 0) return ret; if (buflen > st->buf_len) { buf = kmalloc(buflen, GFP_KERNEL); if (!buf) { mutex_unlock(&st->buf_mutex); return -ENOMEM; } info("successfully reallocated a bigger buffer"); kfree(st->buf); st->buf = buf; st->buf_len = buflen; } else { buf = st->buf; } buf[0] = 0x00; buf[1] = cmd; memcpy(&buf[2], o, olen); ret = vp702x_usb_inout_op(d, buf, olen+2, buf, ilen+1, msec); if (ret == 0) memcpy(i, &buf[1], ilen); mutex_unlock(&st->buf_mutex); return ret; } static int vp702x_set_pld_mode(struct dvb_usb_adapter *adap, u8 bypass) { int ret; struct vp702x_device_state *st = adap->dev->priv; u8 *buf; mutex_lock(&st->buf_mutex); buf = st->buf; memset(buf, 0, 16); ret = vp702x_usb_in_op(adap->dev, 0xe0, (bypass << 8) | 0x0e, 0, buf, 16); mutex_unlock(&st->buf_mutex); return ret; } static int vp702x_set_pld_state(struct dvb_usb_adapter *adap, u8 state) { int ret; struct vp702x_device_state *st = adap->dev->priv; u8 *buf; mutex_lock(&st->buf_mutex); buf = st->buf; memset(buf, 0, 16); ret = vp702x_usb_in_op(adap->dev, 0xe0, (state << 8) | 0x0f, 0, buf, 16); mutex_unlock(&st->buf_mutex); return ret; } static int vp702x_set_pid(struct dvb_usb_adapter *adap, u16 pid, u8 id, int onoff) { struct vp702x_adapter_state *st = adap->priv; struct vp702x_device_state *dst = adap->dev->priv; u8 *buf; if (onoff) st->pid_filter_state |= (1 << id); else { st->pid_filter_state &= ~(1 << id); pid = 0xffff; } id = 0x10 + id*2; vp702x_set_pld_state(adap, st->pid_filter_state); mutex_lock(&dst->buf_mutex); buf = dst->buf; memset(buf, 0, 16); vp702x_usb_in_op(adap->dev, 0xe0, (((pid >> 8) & 0xff) << 8) | (id), 0, buf, 16); vp702x_usb_in_op(adap->dev, 0xe0, (((pid ) & 0xff) << 8) | (id+1), 0, buf, 16); mutex_unlock(&dst->buf_mutex); return 0; } static int vp702x_init_pid_filter(struct dvb_usb_adapter *adap) { struct vp702x_adapter_state *st = adap->priv; struct vp702x_device_state *dst = adap->dev->priv; int i; u8 *b; st->pid_filter_count = 8; st->pid_filter_can_bypass = 1; st->pid_filter_state = 0x00; vp702x_set_pld_mode(adap, 1); /* bypass */ for (i = 0; i < st->pid_filter_count; i++) vp702x_set_pid(adap, 0xffff, i, 1); mutex_lock(&dst->buf_mutex); b = dst->buf; memset(b, 0, 10); vp702x_usb_in_op(adap->dev, 0xb5, 3, 0, b, 10); vp702x_usb_in_op(adap->dev, 0xb5, 0, 0, b, 10); vp702x_usb_in_op(adap->dev, 0xb5, 1, 0, b, 10); mutex_unlock(&dst->buf_mutex); /*vp702x_set_pld_mode(d, 0); // filter */ return 0; } static int vp702x_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { return 0; } /* keys for the enclosed remote control */ static struct rc_map_table rc_map_vp702x_table[] = { { 0x0001, KEY_1 }, { 0x0002, KEY_2 }, }; /* remote control stuff (does not work with my box) */ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { /* remove the following return to enabled remote querying */ #if 0 u8 *key; int i; key = kmalloc(10, GFP_KERNEL); if (!key) return -ENOMEM; vp702x_usb_in_op(d,READ_REMOTE_REQ,0,0,key,10); deb_rc("remote query key: %x %d\n",key[1],key[1]); if (key[1] == 0x44) { *state = REMOTE_NO_KEY_PRESSED; kfree(key); return 0; } for (i = 0; i < ARRAY_SIZE(rc_map_vp702x_table); i++) if (rc5_custom(&rc_map_vp702x_table[i]) == key[1]) { *state = REMOTE_KEY_PRESSED; *event = rc_map_vp702x_table[i].keycode; break; } kfree(key); #endif return 0; } static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6]) { u8 i, *buf; int ret; struct vp702x_device_state *st = d->priv; mutex_lock(&st->buf_mutex); buf = st->buf; for (i = 6; i < 12; i++) { ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1); if (ret < 0) goto err; } memcpy(mac, buf, 6); err: mutex_unlock(&st->buf_mutex); return ret; } static int vp702x_frontend_attach(struct dvb_usb_adapter *adap) { u8 buf[10] = { 0 }; vp702x_usb_out_op(adap->dev, SET_TUNER_POWER_REQ, 0, 7, NULL, 0); if (vp702x_usb_inout_cmd(adap->dev, GET_SYSTEM_STRING, NULL, 0, buf, 10, 10)) return -EIO; buf[9] = '\0'; info("system string: %s",&buf[1]); vp702x_init_pid_filter(adap); adap->fe_adap[0].fe = vp702x_fe_attach(adap->dev); vp702x_usb_out_op(adap->dev, SET_TUNER_POWER_REQ, 1, 7, NULL, 0); return 0; } static struct dvb_usb_device_properties vp702x_properties; static int vp702x_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d; struct vp702x_device_state *st; int ret; ret = dvb_usb_device_init(intf, &vp702x_properties, THIS_MODULE, &d, adapter_nr); if (ret) goto out; st = d->priv; st->buf_len = 16; st->buf = kmalloc(st->buf_len, GFP_KERNEL); if (!st->buf) { ret = -ENOMEM; dvb_usb_device_exit(intf); goto out; } mutex_init(&st->buf_mutex); out: return ret; } static void vp702x_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); struct vp702x_device_state *st = d->priv; mutex_lock(&st->buf_mutex); kfree(st->buf); mutex_unlock(&st->buf_mutex); dvb_usb_device_exit(intf); } enum { VISIONPLUS_VP7021_COLD, VISIONPLUS_VP7020_COLD, VISIONPLUS_VP7020_WARM, }; static struct usb_device_id vp702x_usb_table[] = { DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7021_COLD), // DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7020_COLD), // DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7020_WARM), { } }; MODULE_DEVICE_TABLE(usb, vp702x_usb_table); static struct dvb_usb_device_properties vp702x_properties = { .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-vp702x-02.fw", .no_reconnect = 1, .size_of_priv = sizeof(struct vp702x_device_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_RECEIVES_204_BYTE_TS, .streaming_ctrl = vp702x_streaming_ctrl, .frontend_attach = vp702x_frontend_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x02, .u = { .bulk = { .buffersize = 4096, } } }, }}, .size_of_priv = sizeof(struct vp702x_adapter_state), } }, .read_mac_address = vp702x_read_mac_addr, .rc.legacy = { .rc_map_table = rc_map_vp702x_table, .rc_map_size = ARRAY_SIZE(rc_map_vp702x_table), .rc_interval = 400, .rc_query = vp702x_rc_query, }, .num_device_descs = 1, .devices = { { .name = "TwinhanDTV StarBox DVB-S USB2.0 (VP7021)", .cold_ids = { &vp702x_usb_table[VISIONPLUS_VP7021_COLD], NULL }, .warm_ids = { NULL }, }, /* { .name = "TwinhanDTV StarBox DVB-S USB2.0 (VP7020)", .cold_ids = { &vp702x_usb_table[VISIONPLUS_VP7020_COLD], NULL }, .warm_ids = { &vp702x_usb_table[VISIONPLUS_VP7020_WARM], NULL }, }, */ { NULL }, } }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver vp702x_usb_driver = { .name = "dvb_usb_vp702x", .probe = vp702x_usb_probe, .disconnect = vp702x_usb_disconnect, .id_table = vp702x_usb_table, }; module_usb_driver(vp702x_usb_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); |
2 2 1 2 2 2 2 2 1 1 1 1 1 1 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Pixart PAC7311 library * Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> */ /* Some documentation about various registers as determined by trial and error. * * Register page 1: * * Address Description * 0x08 Unknown compressor related, must always be 8 except when not * in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 ! * 0x1b Auto white balance related, bit 0 is AWB enable (inverted) * bits 345 seem to toggle per color gains on/off (inverted) * 0x78 Global control, bit 6 controls the LED (inverted) * 0x80 Compression balance, interesting settings: * 0x01 Use this to allow the camera to switch to higher compr. * on the fly. Needed to stay within bandwidth @ 640x480@30 * 0x1c From usb captures under Windows for 640x480 * 0x2a Values >= this switch the camera to a lower compression, * using the same table for both luminance and chrominance. * This gives a sharper picture. Usable only at 640x480@ < * 15 fps or 320x240 / 160x120. Note currently the driver * does not use this as the quality gain is small and the * generated JPG-s are only understood by v4l-utils >= 0.8.9 * 0x3f From usb captures under Windows for 320x240 * 0x69 From usb captures under Windows for 160x120 * * Register page 4: * * Address Description * 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on * the 7302, so one of 3, 6, 9, ..., except when between 6 and 12? * 0x0f Master gain 1-245, low value = high gain * 0x10 Another gain 0-15, limited influence (1-2x gain I guess) * 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused * Note setting vflip disabled leads to a much lower image quality, * so we always vflip, and tell userspace to flip it back * 0x27 Seems to toggle various gains on / off, Setting bit 7 seems to * completely disable the analog amplification block. Set to 0x68 * for max gain, 0x14 for minimal gain. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "pac7311" #include <linux/input.h> #include "gspca.h" /* Include pac common sof detection functions */ #include "pac_common.h" #define PAC7311_GAIN_DEFAULT 122 #define PAC7311_EXPOSURE_DEFAULT 3 /* 20 fps, avoid using high compr. */ MODULE_AUTHOR("Thomas Kaiser thomas@kaiser-linux.li"); MODULE_DESCRIPTION("Pixart PAC7311"); MODULE_LICENSE("GPL"); struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *contrast; struct v4l2_ctrl *hflip; u8 sof_read; u8 autogain_ignore_frames; atomic_t avg_lum; }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {320, 240, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; #define LOAD_PAGE4 254 #define END_OF_SEQUENCE 0 static const __u8 init_7311[] = { 0xff, 0x01, 0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */ 0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */ 0x78, 0x44, /* Bit_0=start stream, Bit_6=LED */ 0xff, 0x04, 0x27, 0x80, 0x28, 0xca, 0x29, 0x53, 0x2a, 0x0e, 0xff, 0x01, 0x3e, 0x20, }; static const __u8 start_7311[] = { /* index, len, [value]* */ 0xff, 1, 0x01, /* page 1 */ 0x02, 43, 0x48, 0x0a, 0x40, 0x08, 0x00, 0x00, 0x08, 0x00, 0x06, 0xff, 0x11, 0xff, 0x5a, 0x30, 0x90, 0x4c, 0x00, 0x07, 0x00, 0x0a, 0x10, 0x00, 0xa0, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 42, 0x00, 0x00, 0x78, 0x52, 0x4a, 0x52, 0x78, 0x6e, 0x48, 0x46, 0x48, 0x6e, 0x5f, 0x49, 0x42, 0x49, 0x5f, 0x5f, 0x49, 0x42, 0x49, 0x5f, 0x6e, 0x48, 0x46, 0x48, 0x6e, 0x78, 0x52, 0x4a, 0x52, 0x78, 0x00, 0x00, 0x09, 0x1b, 0x34, 0x49, 0x5c, 0x9b, 0xd0, 0xff, 0x78, 6, 0x44, 0x00, 0xf2, 0x01, 0x01, 0x80, 0x7f, 18, 0x2a, 0x1c, 0x00, 0xc8, 0x02, 0x58, 0x03, 0x84, 0x12, 0x00, 0x1a, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x18, 0x20, 0x96, 3, 0x01, 0x08, 0x04, 0xa0, 4, 0x44, 0x44, 0x44, 0x04, 0xf0, 13, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x20, 0x00, 0x3f, 0x00, 0x0a, 0x01, 0x00, 0xff, 1, 0x04, /* page 4 */ 0, LOAD_PAGE4, /* load the page 4 */ 0x11, 1, 0x01, 0, END_OF_SEQUENCE /* end of sequence */ }; #define SKIP 0xaa /* page 4 - the value SKIP says skip the index - see reg_w_page() */ static const __u8 page4_7311[] = { SKIP, SKIP, 0x04, 0x54, 0x07, 0x2b, 0x09, 0x0f, 0x09, 0x00, SKIP, SKIP, 0x07, 0x00, 0x00, 0x62, 0x08, SKIP, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xa0, 0x01, 0xf4, SKIP, SKIP, 0x00, 0x08, SKIP, 0x03, SKIP, 0x00, 0x68, 0xca, 0x10, 0x06, 0x78, 0x00, 0x00, 0x00, 0x00, 0x23, 0x28, 0x04, 0x11, 0x00, 0x00 }; static void reg_w_buf(struct gspca_dev *gspca_dev, __u8 index, const u8 *buffer, int len) { int ret; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, buffer, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_w_buf() failed index 0x%02x, error %d\n", index, ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, __u8 index, __u8 value) { int ret; if (gspca_dev->usb_err < 0) return; gspca_dev->usb_buf[0] = value; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w() failed index 0x%02x, value 0x%02x, error %d\n", index, value, ret); gspca_dev->usb_err = ret; } } static void reg_w_seq(struct gspca_dev *gspca_dev, const __u8 *seq, int len) { while (--len >= 0) { reg_w(gspca_dev, seq[0], seq[1]); seq += 2; } } /* load the beginning of a page */ static void reg_w_page(struct gspca_dev *gspca_dev, const __u8 *page, int len) { int index; int ret = 0; if (gspca_dev->usb_err < 0) return; for (index = 0; index < len; index++) { if (page[index] == SKIP) /* skip this index */ continue; gspca_dev->usb_buf[0] = page[index]; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w_page() failed index 0x%02x, value 0x%02x, error %d\n", index, page[index], ret); gspca_dev->usb_err = ret; break; } } } /* output a variable sequence */ static void reg_w_var(struct gspca_dev *gspca_dev, const __u8 *seq, const __u8 *page4, unsigned int page4_len) { int index, len; for (;;) { index = *seq++; len = *seq++; switch (len) { case END_OF_SEQUENCE: return; case LOAD_PAGE4: reg_w_page(gspca_dev, page4, page4_len); break; default: if (len > USB_BUF_SZ) { gspca_err(gspca_dev, "Incorrect variable sequence\n"); return; } while (len > 0) { if (len < 8) { reg_w_buf(gspca_dev, index, seq, len); seq += len; break; } reg_w_buf(gspca_dev, index, seq, 8); seq += 8; index += 8; len -= 8; } } } /* not reached */ } /* this function is called at probe time for pac7311 */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); cam->input_flags = V4L2_IN_ST_VFLIP; return 0; } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { reg_w(gspca_dev, 0xff, 0x04); reg_w(gspca_dev, 0x10, val); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setgain(struct gspca_dev *gspca_dev, s32 val) { reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ reg_w(gspca_dev, 0x0e, 0x00); reg_w(gspca_dev, 0x0f, gspca_dev->gain->maximum - val + 1); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setexposure(struct gspca_dev *gspca_dev, s32 val) { reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ reg_w(gspca_dev, 0x02, val); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); /* * Page 1 register 8 must always be 0x08 except when not in * 640x480 mode and page 4 reg 2 <= 3 then it must be 9 */ reg_w(gspca_dev, 0xff, 0x01); if (gspca_dev->pixfmt.width != 640 && val <= 3) reg_w(gspca_dev, 0x08, 0x09); else reg_w(gspca_dev, 0x08, 0x08); /* * Page1 register 80 sets the compression balance, normally we * want / use 0x1c, but for 640x480@30fps we must allow the * camera to use higher compression or we may run out of * bandwidth. */ if (gspca_dev->pixfmt.width == 640 && val == 2) reg_w(gspca_dev, 0x80, 0x01); else reg_w(gspca_dev, 0x80, 0x1c); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip) { __u8 data; reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ data = (hflip ? 0x04 : 0x00) | (vflip ? 0x08 : 0x00); reg_w(gspca_dev, 0x21, data); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } /* this function is called at probe and resume time for pac7311 */ static int sd_init(struct gspca_dev *gspca_dev) { reg_w_seq(gspca_dev, init_7311, sizeof(init_7311)/2); return gspca_dev->usb_err; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) { /* when switching to autogain set defaults to make sure we are on a valid point of the autogain gain / exposure knee graph, and give this change time to take effect before doing autogain. */ gspca_dev->exposure->val = PAC7311_EXPOSURE_DEFAULT; gspca_dev->gain->val = PAC7311_GAIN_DEFAULT; sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; } if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val)) setexposure(gspca_dev, gspca_dev->exposure->val); if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val)) setgain(gspca_dev, gspca_dev->gain->val); break; case V4L2_CID_HFLIP: sethvflip(gspca_dev, sd->hflip->val, 1); break; default: return -EINVAL; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; /* this function is called at probe time */ static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 15, 1, 7); gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 2, 63, 1, PAC7311_EXPOSURE_DEFAULT); gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 244, 1, PAC7311_GAIN_DEFAULT); sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->sof_read = 0; reg_w_var(gspca_dev, start_7311, page4_7311, sizeof(page4_7311)); setcontrast(gspca_dev, v4l2_ctrl_g_ctrl(sd->contrast)); setgain(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->gain)); setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); sethvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip), 1); /* set correct resolution */ switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { case 2: /* 160x120 */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x17, 0x20); reg_w(gspca_dev, 0x87, 0x10); break; case 1: /* 320x240 */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x17, 0x30); reg_w(gspca_dev, 0x87, 0x11); break; case 0: /* 640x480 */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x17, 0x00); reg_w(gspca_dev, 0x87, 0x12); break; } sd->sof_read = 0; sd->autogain_ignore_frames = 0; atomic_set(&sd->avg_lum, -1); /* start stream */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x05); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, 0xff, 0x04); reg_w(gspca_dev, 0x27, 0x80); reg_w(gspca_dev, 0x28, 0xca); reg_w(gspca_dev, 0x29, 0x53); reg_w(gspca_dev, 0x2a, 0x0e); reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x3e, 0x20); reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int avg_lum = atomic_read(&sd->avg_lum); int desired_lum, deadzone; if (avg_lum == -1) return; desired_lum = 170; deadzone = 20; if (sd->autogain_ignore_frames > 0) sd->autogain_ignore_frames--; else if (gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum, desired_lum, deadzone)) sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; } /* JPEG header, part 1 */ static const unsigned char pac_jpeg_header1[] = { 0xff, 0xd8, /* SOI: Start of Image */ 0xff, 0xc0, /* SOF0: Start of Frame (Baseline DCT) */ 0x00, 0x11, /* length = 17 bytes (including this length field) */ 0x08 /* Precision: 8 */ /* 2 bytes is placed here: number of image lines */ /* 2 bytes is placed here: samples per line */ }; /* JPEG header, continued */ static const unsigned char pac_jpeg_header2[] = { 0x03, /* Number of image components: 3 */ 0x01, 0x21, 0x00, /* ID=1, Subsampling 1x1, Quantization table: 0 */ 0x02, 0x11, 0x01, /* ID=2, Subsampling 2x1, Quantization table: 1 */ 0x03, 0x11, 0x01, /* ID=3, Subsampling 2x1, Quantization table: 1 */ 0xff, 0xda, /* SOS: Start Of Scan */ 0x00, 0x0c, /* length = 12 bytes (including this length field) */ 0x03, /* number of components: 3 */ 0x01, 0x00, /* selector 1, table 0x00 */ 0x02, 0x11, /* selector 2, table 0x11 */ 0x03, 0x11, /* selector 3, table 0x11 */ 0x00, 0x3f, /* Spectral selection: 0 .. 63 */ 0x00 /* Successive approximation: 0 */ }; static void pac_start_frame(struct gspca_dev *gspca_dev, __u16 lines, __u16 samples_per_line) { unsigned char tmpbuf[4]; gspca_frame_add(gspca_dev, FIRST_PACKET, pac_jpeg_header1, sizeof(pac_jpeg_header1)); tmpbuf[0] = lines >> 8; tmpbuf[1] = lines & 0xff; tmpbuf[2] = samples_per_line >> 8; tmpbuf[3] = samples_per_line & 0xff; gspca_frame_add(gspca_dev, INTER_PACKET, tmpbuf, sizeof(tmpbuf)); gspca_frame_add(gspca_dev, INTER_PACKET, pac_jpeg_header2, sizeof(pac_jpeg_header2)); } /* this function is run at interrupt level */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; u8 *image; unsigned char *sof; sof = pac_find_sof(gspca_dev, &sd->sof_read, data, len); if (sof) { int n, lum_offset, footer_length; /* * 6 bytes after the FF D9 EOF marker a number of lumination * bytes are send corresponding to different parts of the * image, the 14th and 15th byte after the EOF seem to * correspond to the center of the image. */ lum_offset = 24 + sizeof pac_sof_marker; footer_length = 26; /* Finish decoding current frame */ n = (sof - data) - (footer_length + sizeof pac_sof_marker); if (n < 0) { gspca_dev->image_len += n; n = 0; } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, n); } image = gspca_dev->image; if (image != NULL && image[gspca_dev->image_len - 2] == 0xff && image[gspca_dev->image_len - 1] == 0xd9) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); n = sof - data; len -= n; data = sof; /* Get average lumination */ if (gspca_dev->last_packet_type == LAST_PACKET && n >= lum_offset) atomic_set(&sd->avg_lum, data[-lum_offset] + data[-lum_offset + 1]); else atomic_set(&sd->avg_lum, -1); /* Start the new frame with the jpeg header */ pac_start_frame(gspca_dev, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; u8 data0, data1; if (len == 2) { data0 = data[0]; data1 = data[1]; if ((data0 == 0x00 && data1 == 0x11) || (data0 == 0x22 && data1 == 0x33) || (data0 == 0x44 && data1 == 0x55) || (data0 == 0x66 && data1 == 0x77) || (data0 == 0x88 && data1 == 0x99) || (data0 == 0xaa && data1 == 0xbb) || (data0 == 0xcc && data1 == 0xdd) || (data0 == 0xee && data1 == 0xff)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } } return ret; } #endif static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x093a, 0x2600)}, {USB_DEVICE(0x093a, 0x2601)}, {USB_DEVICE(0x093a, 0x2603)}, {USB_DEVICE(0x093a, 0x2608)}, {USB_DEVICE(0x093a, 0x260e)}, {USB_DEVICE(0x093a, 0x260f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
12 12 415 414 413 415 26 208 208 206 166 233 156 10 946 946 953 595 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ #include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> #include <linux/minmax.h> #include <linux/nospec.h> #include <linux/sched.h> #include <linux/thread_info.h> #include <asm/uaccess.h> /* * Architectures that support memory tagging (assigning tags to memory regions, * embedding these tags into addresses that point to these memory regions, and * checking that the memory and the pointer tags match on memory accesses) * redefine this macro to strip tags from pointers. * * Passing down mm_struct allows to define untagging rules on per-process * basis. * * It's defined as noop for architectures that don't support memory tagging. */ #ifndef untagged_addr #define untagged_addr(addr) (addr) #endif #ifndef untagged_addr_remote #define untagged_addr_remote(mm, addr) ({ \ mmap_assert_locked(mm); \ untagged_addr(addr); \ }) #endif #ifdef masked_user_access_begin #define can_do_masked_user_access() 1 #else #define can_do_masked_user_access() 0 #define masked_user_access_begin(src) NULL #endif /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and * __copy_{to,from}_user{,_inatomic}(). * * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and * return the amount left to copy. They should assume that access_ok() has * already been checked (and succeeded); they should *not* zero-pad anything. * No KASAN or object size checks either - those belong here. * * Both of these functions should attempt to copy size bytes starting at from * into the area starting at to. They must not fetch or store anything * outside of those areas. Return value must be between 0 (everything * copied successfully) and size (nothing copied). * * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting * at to must become equal to the bytes fetched from the corresponding area * starting at from. All data past to + size - N must be left unmodified. * * If copying succeeds, the return value must be 0. If some data cannot be * fetched, it is permitted to copy less than had been fetched; the only * hard requirement is that not storing anything at all (i.e. returning size) * should happen only when nothing could be copied. In other words, you don't * have to squeeze as much as possible - it is allowed, but not necessary. * * For raw_copy_from_user() to always points to kernel memory and no faults * on store should happen. Interpretation of from is affected by set_fs(). * For raw_copy_to_user() it's the other way round. * * Both can be inlined - it's up to architectures whether it wants to bother * with that. They should not be used directly; they are used to implement * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) * that are used instead. Out of those, __... ones are inlined. Plain * copy_{to,from}_user() might or might not be inlined. If you want them * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. * * NOTE: only copy_from_user() zero-pads the destination in case of short copy. * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything * at all; their callers absolutely must check the return value. * * Biarch ones should also provide raw_copy_in_user() - similar to the above, * but both source and destination are __user pointers (affected by set_fs() * as usual) and both source and destination can trigger faults. */ static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { unsigned long res; instrument_copy_from_user_before(to, from, n); check_object_size(to, n, false); res = raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); return res; } static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res; might_fault(); instrument_copy_from_user_before(to, from, n); if (should_fail_usercopy()) return n; check_object_size(to, n, false); res = raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); return res; } /** * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. * @from: Source address, in kernel space. * @n: Number of bytes to copy. * * Context: User context only. * * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. * The caller should also make sure he pins the user space address * so that we don't result in page fault and sleep. */ static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { if (should_fail_usercopy()) return n; instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (should_fail_usercopy()) return n; instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } /* * Architectures that #define INLINE_COPY_TO_USER use this function * directly in the normal copy_to/from_user(), the other ones go * through an extern _copy_to/from_user(), which expands the same code * here. * * Rust code always uses the extern definition. */ static inline __must_check unsigned long _inline_copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; might_fault(); if (!should_fail_usercopy() && likely(access_ok(from, n))) { /* * Ensure that bad access_ok() speculation will not * lead to nasty side effects *after* the copy is * finished: */ barrier_nospec(); instrument_copy_from_user_before(to, from, n); res = raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); } if (unlikely(res)) memset(to + (n - res), 0, res); return res; } extern __must_check unsigned long _copy_from_user(void *, const void __user *, unsigned long); static inline __must_check unsigned long _inline_copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (should_fail_usercopy()) return n; if (access_ok(to, n)) { instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; } extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (!check_copy_size(to, n, false)) return n; #ifdef INLINE_COPY_FROM_USER return _inline_copy_from_user(to, from, n); #else return _copy_from_user(to, from, n); #endif } static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { if (!check_copy_size(from, n, true)) return n; #ifdef INLINE_COPY_TO_USER return _inline_copy_to_user(to, from, n); #else return _copy_to_user(to, from, n); #endif } #ifndef copy_mc_to_kernel /* * Without arch opt-in this generic copy_mc_to_kernel() will not handle * #MC (or arch equivalent) during source read. */ static inline unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, size_t cnt) { memcpy(dst, src, cnt); return 0; } #endif static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; } static __always_inline void pagefault_disabled_dec(void) { current->pagefault_disabled--; } /* * These routines enable/disable the pagefault handler. If disabled, it will * not take any locks and go straight to the fixup table. * * User access methods will not sleep when called from a pagefault_disabled() * environment. */ static inline void pagefault_disable(void) { pagefault_disabled_inc(); /* * make sure to have issued the store before a pagefault * can hit. */ barrier(); } static inline void pagefault_enable(void) { /* * make sure to issue those last loads/stores before enabling * the pagefault handler again. */ barrier(); pagefault_disabled_dec(); } /* * Is the pagefault handler disabled? If so, user access methods will not sleep. */ static inline bool pagefault_disabled(void) { return current->pagefault_disabled != 0; } /* * The pagefault handler is in general disabled by pagefault_disable() or * when in irq context (via in_atomic()). * * This function should only be used by the fault handlers. Other users should * stick to pagefault_disabled(). * Please NEVER use preempt_disable() to disable the fault handler. With * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. */ #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS /** * probe_subpage_writeable: probe the user range for write faults at sub-page * granularity (e.g. arm64 MTE) * @uaddr: start of address range * @size: size of address range * * Returns 0 on success, the number of bytes not probed on fault. * * It is expected that the caller checked for the write permission of each * page in the range either by put_user() or GUP. The architecture port can * implement a more efficient get_user() probing if the same sub-page faults * are triggered by either a read or a write. */ static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size) { return 0; } #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ #ifndef ARCH_HAS_NOCACHE_UACCESS static inline __must_check unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { return __copy_from_user_inatomic(to, from, n); } #endif /* ARCH_HAS_NOCACHE_UACCESS */ extern __must_check int check_zeroed_user(const void __user *from, size_t size); /** * copy_struct_from_user: copy a struct from userspace * @dst: Destination address, in kernel space. This buffer must be @ksize * bytes long. * @ksize: Size of @dst struct. * @src: Source address, in userspace. * @usize: (Alleged) size of @src struct. * * Copies a struct from userspace to kernel space, in a way that guarantees * backwards-compatibility for struct syscall arguments (as long as future * struct extensions are made such that all new fields are *appended* to the * old struct, and zeroed-out new fields have the same meaning as the old * struct). * * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. * The recommended usage is something like the following: * * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) * { * int err; * struct foo karg = {}; * * if (usize > PAGE_SIZE) * return -E2BIG; * if (usize < FOO_SIZE_VER0) * return -EINVAL; * * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); * if (err) * return err; * * // ... * } * * There are three cases to consider: * * If @usize == @ksize, then it's copied verbatim. * * If @usize < @ksize, then the userspace has passed an old struct to a * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) * are to be zero-filled. * * If @usize > @ksize, then the userspace has passed a new struct to an * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) * are checked to ensure they are zeroed, otherwise -E2BIG is returned. * * Returns (in all cases, some data may have been copied): * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. * * -EFAULT: access to userspace failed. */ static __always_inline __must_check int copy_struct_from_user(void *dst, size_t ksize, const void __user *src, size_t usize) { size_t size = min(ksize, usize); size_t rest = max(ksize, usize) - size; /* Double check if ksize is larger than a known object size. */ if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1))) return -E2BIG; /* Deal with trailing bytes. */ if (usize < ksize) { memset(dst + size, 0, rest); } else if (usize > ksize) { int ret = check_zeroed_user(src + size, rest); if (ret <= 0) return ret ?: -E2BIG; } /* Copy the interoperable parts of the struct. */ if (copy_from_user(dst, src, size)) return -EFAULT; return 0; } bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); long copy_from_kernel_nofault(void *dst, const void *src, size_t size); long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); long copy_from_user_nofault(void *dst, const void __user *src, size_t size); long notrace copy_to_user_nofault(void __user *dst, const void *src, size_t size); long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count); long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count); long strnlen_user_nofault(const void __user *unsafe_addr, long count); #ifndef __get_kernel_nofault #define __get_kernel_nofault(dst, src, type, label) \ do { \ type __user *p = (type __force __user *)(src); \ type data; \ if (__get_user(data, p)) \ goto label; \ *(type *)dst = data; \ } while (0) #define __put_kernel_nofault(dst, src, type, label) \ do { \ type __user *p = (type __force __user *)(dst); \ type data = *(type *)src; \ if (__put_user(data, p)) \ goto label; \ } while (0) #endif /** * get_kernel_nofault(): safely attempt to read from a location * @val: read into this variable * @ptr: address to read from * * Returns 0 on success, or -EFAULT. */ #define get_kernel_nofault(val, ptr) ({ \ const typeof(val) *__gk_ptr = (ptr); \ copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ }) #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) #define user_access_end() do { } while (0) #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e) static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif #ifndef user_write_access_begin #define user_write_access_begin user_access_begin #define user_write_access_end user_access_end #endif #ifndef user_read_access_begin #define user_read_access_begin user_access_begin #define user_read_access_end user_access_end #endif #ifdef CONFIG_HARDENED_USERCOPY void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); #endif #endif /* __LINUX_UACCESS_H__ */ |
4 4 4 4 4 4 4 4 13 13 13 13 11 13 12 13 13 13 13 13 8 13 13 13 13 13 13 13 13 13 13 13 13 13 12 12 12 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 1 5 13 13 13 13 13 13 13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994, Karl Keyte: Added support for disk statistics * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> * - July2000 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 */ /* * This handles all read/write requests to block devices */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-pm.h> #include <linux/blk-integrity.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/kernel_stat.h> #include <linux/string.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/task_io_accounting_ops.h> #include <linux/fault-inject.h> #include <linux/list_sort.h> #include <linux/delay.h> #include <linux/ratelimit.h> #include <linux/pm_runtime.h> #include <linux/t10-pi.h> #include <linux/debugfs.h> #include <linux/bpf.h> #include <linux/part_stat.h> #include <linux/sched/sysctl.h> #include <linux/blk-crypto.h> #define CREATE_TRACE_POINTS #include <trace/events/block.h> #include "blk.h" #include "blk-mq-sched.h" #include "blk-pm.h" #include "blk-cgroup.h" #include "blk-throttle.h" #include "blk-ioprio.h" struct dentry *blk_debugfs_root; EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert); static DEFINE_IDA(blk_queue_ida); /* * For queue allocation */ static struct kmem_cache *blk_requestq_cachep; /* * Controlling structure to kblockd */ static struct workqueue_struct *kblockd_workqueue; /** * blk_queue_flag_set - atomically set a queue flag * @flag: flag to be set * @q: request queue */ void blk_queue_flag_set(unsigned int flag, struct request_queue *q) { set_bit(flag, &q->queue_flags); } EXPORT_SYMBOL(blk_queue_flag_set); /** * blk_queue_flag_clear - atomically clear a queue flag * @flag: flag to be cleared * @q: request queue */ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) { clear_bit(flag, &q->queue_flags); } EXPORT_SYMBOL(blk_queue_flag_clear); #define REQ_OP_NAME(name) [REQ_OP_##name] = #name static const char *const blk_op_name[] = { REQ_OP_NAME(READ), REQ_OP_NAME(WRITE), REQ_OP_NAME(FLUSH), REQ_OP_NAME(DISCARD), REQ_OP_NAME(SECURE_ERASE), REQ_OP_NAME(ZONE_RESET), REQ_OP_NAME(ZONE_RESET_ALL), REQ_OP_NAME(ZONE_OPEN), REQ_OP_NAME(ZONE_CLOSE), REQ_OP_NAME(ZONE_FINISH), REQ_OP_NAME(ZONE_APPEND), REQ_OP_NAME(WRITE_ZEROES), REQ_OP_NAME(DRV_IN), REQ_OP_NAME(DRV_OUT), }; #undef REQ_OP_NAME /** * blk_op_str - Return string XXX in the REQ_OP_XXX. * @op: REQ_OP_XXX. * * Description: Centralize block layer function to convert REQ_OP_XXX into * string format. Useful in the debugging and tracing bio or request. For * invalid REQ_OP_XXX it returns string "UNKNOWN". */ inline const char *blk_op_str(enum req_op op) { const char *op_str = "UNKNOWN"; if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) op_str = blk_op_name[op]; return op_str; } EXPORT_SYMBOL_GPL(blk_op_str); static const struct { int errno; const char *name; } blk_errors[] = { [BLK_STS_OK] = { 0, "" }, [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" }, [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, [BLK_STS_OFFLINE] = { -ENODEV, "device offline" }, /* device mapper special case, should not leak out: */ [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, /* zone device specific errors */ [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, /* Command duration limit device-side timeout */ [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" }, [BLK_STS_INVAL] = { -EINVAL, "invalid" }, /* everything else not covered above: */ [BLK_STS_IOERR] = { -EIO, "I/O" }, }; blk_status_t errno_to_blk_status(int errno) { int i; for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { if (blk_errors[i].errno == errno) return (__force blk_status_t)i; } return BLK_STS_IOERR; } EXPORT_SYMBOL_GPL(errno_to_blk_status); int blk_status_to_errno(blk_status_t status) { int idx = (__force int)status; if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return -EIO; return blk_errors[idx].errno; } EXPORT_SYMBOL_GPL(blk_status_to_errno); const char *blk_status_to_str(blk_status_t status) { int idx = (__force int)status; if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return "<null>"; return blk_errors[idx].name; } EXPORT_SYMBOL_GPL(blk_status_to_str); /** * blk_sync_queue - cancel any pending callbacks on a queue * @q: the queue * * Description: * The block layer may perform asynchronous callback activity * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources * that the callbacks might use. The caller must already have made sure * that its ->submit_bio will not re-add plugging prior to calling * this function. * * This function does not cancel any asynchronous activity arising * out of elevator or throttling code. That would require elevator_exit() * and blkcg_exit_queue() to be called with queue lock initialized. * */ void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); cancel_work_sync(&q->timeout_work); } EXPORT_SYMBOL(blk_sync_queue); /** * blk_set_pm_only - increment pm_only counter * @q: request queue pointer */ void blk_set_pm_only(struct request_queue *q) { atomic_inc(&q->pm_only); } EXPORT_SYMBOL_GPL(blk_set_pm_only); void blk_clear_pm_only(struct request_queue *q) { int pm_only; pm_only = atomic_dec_return(&q->pm_only); WARN_ON_ONCE(pm_only < 0); if (pm_only == 0) wake_up_all(&q->mq_freeze_wq); } EXPORT_SYMBOL_GPL(blk_clear_pm_only); static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); percpu_ref_exit(&q->q_usage_counter); kmem_cache_free(blk_requestq_cachep, q); } static void blk_free_queue(struct request_queue *q) { blk_free_queue_stats(q->stats); if (queue_is_mq(q)) blk_mq_release(q); ida_free(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); } /** * blk_put_queue - decrement the request_queue refcount * @q: the request_queue structure to decrement the refcount for * * Decrements the refcount of the request_queue and free it when the refcount * reaches 0. */ void blk_put_queue(struct request_queue *q) { if (refcount_dec_and_test(&q->refs)) blk_free_queue(q); } EXPORT_SYMBOL(blk_put_queue); void blk_queue_start_drain(struct request_queue *q) { /* * When queue DYING flag is set, we need to block new req * entering queue, so we call blk_freeze_queue_start() to * prevent I/O from crossing blk_queue_enter(). */ blk_freeze_queue_start(q); if (queue_is_mq(q)) blk_mq_wake_waiters(q); /* Make blk_queue_enter() reexamine the DYING flag. */ wake_up_all(&q->mq_freeze_wq); } /** * blk_queue_enter() - try to increase q->q_usage_counter * @q: request queue pointer * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM */ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) { const bool pm = flags & BLK_MQ_REQ_PM; while (!blk_try_enter_queue(q, pm)) { if (flags & BLK_MQ_REQ_NOWAIT) return -EAGAIN; /* * read pair of barrier in blk_freeze_queue_start(), we need to * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and * reading .mq_freeze_depth or queue dying flag, otherwise the * following wait may never return if the two reads are * reordered. */ smp_rmb(); wait_event(q->mq_freeze_wq, (!q->mq_freeze_depth && blk_pm_resume_queue(pm, q)) || blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; } return 0; } int __bio_queue_enter(struct request_queue *q, struct bio *bio) { while (!blk_try_enter_queue(q, false)) { struct gendisk *disk = bio->bi_bdev->bd_disk; if (bio->bi_opf & REQ_NOWAIT) { if (test_bit(GD_DEAD, &disk->state)) goto dead; bio_wouldblock_error(bio); return -EAGAIN; } /* * read pair of barrier in blk_freeze_queue_start(), we need to * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and * reading .mq_freeze_depth or queue dying flag, otherwise the * following wait may never return if the two reads are * reordered. */ smp_rmb(); wait_event(q->mq_freeze_wq, (!q->mq_freeze_depth && blk_pm_resume_queue(false, q)) || test_bit(GD_DEAD, &disk->state)); if (test_bit(GD_DEAD, &disk->state)) goto dead; } return 0; dead: bio_io_error(bio); return -ENODEV; } void blk_queue_exit(struct request_queue *q) { percpu_ref_put(&q->q_usage_counter); } static void blk_queue_usage_counter_release(struct percpu_ref *ref) { struct request_queue *q = container_of(ref, struct request_queue, q_usage_counter); wake_up_all(&q->mq_freeze_wq); } static void blk_rq_timed_out_timer(struct timer_list *t) { struct request_queue *q = from_timer(q, t, timeout); kblockd_schedule_work(&q->timeout_work); } static void blk_timeout_work(struct work_struct *work) { } struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id) { struct request_queue *q; int error; q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO, node_id); if (!q) return ERR_PTR(-ENOMEM); q->last_merge = NULL; q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); if (q->id < 0) { error = q->id; goto fail_q; } q->stats = blk_alloc_queue_stats(); if (!q->stats) { error = -ENOMEM; goto fail_id; } error = blk_set_default_limits(lim); if (error) goto fail_stats; q->limits = *lim; q->node = node_id; atomic_set(&q->nr_active_requests_shared_tags, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); INIT_WORK(&q->timeout_work, blk_timeout_work); INIT_LIST_HEAD(&q->icq_list); refcount_set(&q->refs, 1); mutex_init(&q->debugfs_mutex); mutex_init(&q->sysfs_lock); mutex_init(&q->sysfs_dir_lock); mutex_init(&q->limits_lock); mutex_init(&q->rq_qos_mutex); spin_lock_init(&q->queue_lock); init_waitqueue_head(&q->mq_freeze_wq); mutex_init(&q->mq_freeze_lock); blkg_init_queue(q); /* * Init percpu_ref in atomic mode so that it's faster to shutdown. * See blk_register_queue() for details. */ error = percpu_ref_init(&q->q_usage_counter, blk_queue_usage_counter_release, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL); if (error) goto fail_stats; q->nr_requests = BLKDEV_DEFAULT_RQ; return q; fail_stats: blk_free_queue_stats(q->stats); fail_id: ida_free(&blk_queue_ida, q->id); fail_q: kmem_cache_free(blk_requestq_cachep, q); return ERR_PTR(error); } /** * blk_get_queue - increment the request_queue refcount * @q: the request_queue structure to increment the refcount for * * Increment the refcount of the request_queue kobject. * * Context: Any context. */ bool blk_get_queue(struct request_queue *q) { if (unlikely(blk_queue_dying(q))) return false; refcount_inc(&q->refs); return true; } EXPORT_SYMBOL(blk_get_queue); #ifdef CONFIG_FAIL_MAKE_REQUEST static DECLARE_FAULT_ATTR(fail_make_request); static int __init setup_fail_make_request(char *str) { return setup_fault_attr(&fail_make_request, str); } __setup("fail_make_request=", setup_fail_make_request); bool should_fail_request(struct block_device *part, unsigned int bytes) { return bdev_test_flag(part, BD_MAKE_IT_FAIL) && should_fail(&fail_make_request, bytes); } static int __init fail_make_request_debugfs(void) { struct dentry *dir = fault_create_debugfs_attr("fail_make_request", NULL, &fail_make_request); return PTR_ERR_OR_ZERO(dir); } late_initcall(fail_make_request_debugfs); #endif /* CONFIG_FAIL_MAKE_REQUEST */ static inline void bio_check_ro(struct bio *bio) { if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return; if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) return; bdev_set_flag(bio->bi_bdev, BD_RO_WARNED); /* * Use ioctl to set underlying disk of raid/dm to read-only * will trigger this. */ pr_warn("Trying to write to read-only block-device %pg\n", bio->bi_bdev); } } static noinline int should_fail_bio(struct bio *bio) { if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) return -EIO; return 0; } ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); /* * Check whether this bio extends beyond the end of the device or partition. * This may well happen - the kernel calls bread() without checking the size of * the device, e.g., when mounting a file system. */ static inline int bio_check_eod(struct bio *bio) { sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); unsigned int nr_sectors = bio_sectors(bio); if (nr_sectors && (nr_sectors > maxsector || bio->bi_iter.bi_sector > maxsector - nr_sectors)) { pr_info_ratelimited("%s: attempt to access beyond end of device\n" "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n", current->comm, bio->bi_bdev, bio->bi_opf, bio->bi_iter.bi_sector, nr_sectors, maxsector); return -EIO; } return 0; } /* * Remap block n of partition p to block n+start(p) of the disk. */ static int blk_partition_remap(struct bio *bio) { struct block_device *p = bio->bi_bdev; if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) return -EIO; if (bio_sectors(bio)) { bio->bi_iter.bi_sector += p->bd_start_sect; trace_block_bio_remap(bio, p->bd_dev, bio->bi_iter.bi_sector - p->bd_start_sect); } bio_set_flag(bio, BIO_REMAPPED); return 0; } /* * Check write append to a zoned block device. */ static inline blk_status_t blk_check_zone_append(struct request_queue *q, struct bio *bio) { int nr_sectors = bio_sectors(bio); /* Only applicable to zoned block devices */ if (!bdev_is_zoned(bio->bi_bdev)) return BLK_STS_NOTSUPP; /* The bio sector must point to the start of a sequential zone */ if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector)) return BLK_STS_IOERR; /* * Not allowed to cross zone boundaries. Otherwise, the BIO will be * split and could result in non-contiguous sectors being written in * different zones. */ if (nr_sectors > q->limits.chunk_sectors) return BLK_STS_IOERR; /* Make sure the BIO is small enough and will not get split */ if (nr_sectors > queue_max_zone_append_sectors(q)) return BLK_STS_IOERR; bio->bi_opf |= REQ_NOMERGE; return BLK_STS_OK; } static void __submit_bio(struct bio *bio) { /* If plug is not used, add new plug here to cache nsecs time. */ struct blk_plug plug; if (unlikely(!blk_crypto_bio_prep(&bio))) return; blk_start_plug(&plug); if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { blk_mq_submit_bio(bio); } else if (likely(bio_queue_enter(bio) == 0)) { struct gendisk *disk = bio->bi_bdev->bd_disk; disk->fops->submit_bio(bio); blk_queue_exit(disk->queue); } blk_finish_plug(&plug); } /* * The loop in this function may be a bit non-obvious, and so deserves some * explanation: * * - Before entering the loop, bio->bi_next is NULL (as all callers ensure * that), so we have a list with a single bio. * - We pretend that we have just taken it off a longer list, so we assign * bio_list to a pointer to the bio_list_on_stack, thus initialising the * bio_list of new bios to be added. ->submit_bio() may indeed add some more * bios through a recursive call to submit_bio_noacct. If it did, we find a * non-NULL value in bio_list and re-enter the loop from the top. * - In this case we really did just take the bio of the top of the list (no * pretending) and so remove it from bio_list, and call into ->submit_bio() * again. * * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. * bio_list_on_stack[1] contains bios that were submitted before the current * ->submit_bio, but that haven't been processed yet. */ static void __submit_bio_noacct(struct bio *bio) { struct bio_list bio_list_on_stack[2]; BUG_ON(bio->bi_next); bio_list_init(&bio_list_on_stack[0]); current->bio_list = bio_list_on_stack; do { struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct bio_list lower, same; /* * Create a fresh bio_list for all subordinate requests. */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); __submit_bio(bio); /* * Sort new bios into those for a lower level and those for the * same level. */ bio_list_init(&lower); bio_list_init(&same); while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) if (q == bdev_get_queue(bio->bi_bdev)) bio_list_add(&same, bio); else bio_list_add(&lower, bio); /* * Now assemble so we handle the lowest level first. */ bio_list_merge(&bio_list_on_stack[0], &lower); bio_list_merge(&bio_list_on_stack[0], &same); bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); current->bio_list = NULL; } static void __submit_bio_noacct_mq(struct bio *bio) { struct bio_list bio_list[2] = { }; current->bio_list = bio_list; do { __submit_bio(bio); } while ((bio = bio_list_pop(&bio_list[0]))); current->bio_list = NULL; } void submit_bio_noacct_nocheck(struct bio *bio) { blk_cgroup_bio_start(bio); blkcg_bio_issue_init(bio); if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { trace_block_bio_queue(bio); /* * Now that enqueuing has been traced, we need to trace * completion as well. */ bio_set_flag(bio, BIO_TRACE_COMPLETION); } /* * We only want one ->submit_bio to be active at a time, else stack * usage with stacked devices could be a problem. Use current->bio_list * to collect a list of requests submited by a ->submit_bio method while * it is active, and then process them after it returned. */ if (current->bio_list) bio_list_add(¤t->bio_list[0], bio); else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) __submit_bio_noacct_mq(bio); else __submit_bio_noacct(bio); } static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q, struct bio *bio) { if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q)) return BLK_STS_INVAL; if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q)) return BLK_STS_INVAL; return BLK_STS_OK; } /** * submit_bio_noacct - re-submit a bio to the block device layer for I/O * @bio: The bio describing the location in memory and on the device. * * This is a version of submit_bio() that shall only be used for I/O that is * resubmitted to lower level drivers by stacking block drivers. All file * systems and other upper level users of the block layer should use * submit_bio() instead. */ void submit_bio_noacct(struct bio *bio) { struct block_device *bdev = bio->bi_bdev; struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; might_sleep(); /* * For a REQ_NOWAIT based request, return -EOPNOTSUPP * if queue does not support NOWAIT. */ if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) goto not_supported; if (should_fail_bio(bio)) goto end_io; bio_check_ro(bio); if (!bio_flagged(bio, BIO_REMAPPED)) { if (unlikely(bio_check_eod(bio))) goto end_io; if (bdev_is_partition(bdev) && unlikely(blk_partition_remap(bio))) goto end_io; } /* * Filter flush bio's early so that bio based drivers without flush * support don't have to worry about them. */ if (op_is_flush(bio->bi_opf)) { if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE && bio_op(bio) != REQ_OP_ZONE_APPEND)) goto end_io; if (!bdev_write_cache(bdev)) { bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); if (!bio_sectors(bio)) { status = BLK_STS_OK; goto end_io; } } } if (!(q->limits.features & BLK_FEAT_POLL) && (bio->bi_opf & REQ_POLLED)) { bio_clear_polled(bio); goto not_supported; } switch (bio_op(bio)) { case REQ_OP_READ: break; case REQ_OP_WRITE: if (bio->bi_opf & REQ_ATOMIC) { status = blk_validate_atomic_write_op_size(q, bio); if (status != BLK_STS_OK) goto end_io; } break; case REQ_OP_FLUSH: /* * REQ_OP_FLUSH can't be submitted through bios, it is only * synthetized in struct request by the flush state machine. */ goto not_supported; case REQ_OP_DISCARD: if (!bdev_max_discard_sectors(bdev)) goto not_supported; break; case REQ_OP_SECURE_ERASE: if (!bdev_max_secure_erase_sectors(bdev)) goto not_supported; break; case REQ_OP_ZONE_APPEND: status = blk_check_zone_append(q, bio); if (status != BLK_STS_OK) goto end_io; break; case REQ_OP_WRITE_ZEROES: if (!q->limits.max_write_zeroes_sectors) goto not_supported; break; case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_RESET_ALL: if (!bdev_is_zoned(bio->bi_bdev)) goto not_supported; break; case REQ_OP_DRV_IN: case REQ_OP_DRV_OUT: /* * Driver private operations are only used with passthrough * requests. */ fallthrough; default: goto not_supported; } if (blk_throtl_bio(bio)) return; submit_bio_noacct_nocheck(bio); return; not_supported: status = BLK_STS_NOTSUPP; end_io: bio->bi_status = status; bio_endio(bio); } EXPORT_SYMBOL(submit_bio_noacct); static void bio_set_ioprio(struct bio *bio) { /* Nobody set ioprio so far? Initialize it based on task's nice value */ if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) bio->bi_ioprio = get_current_ioprio(); blkcg_set_ioprio(bio); } /** * submit_bio - submit a bio to the block device layer for I/O * @bio: The &struct bio which describes the I/O * * submit_bio() is used to submit I/O requests to block devices. It is passed a * fully set up &struct bio that describes the I/O that needs to be done. The * bio will be send to the device described by the bi_bdev field. * * The success/failure status of the request, along with notification of * completion, is delivered asynchronously through the ->bi_end_io() callback * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has * been called. */ void submit_bio(struct bio *bio) { if (bio_op(bio) == REQ_OP_READ) { task_io_account_read(bio->bi_iter.bi_size); count_vm_events(PGPGIN, bio_sectors(bio)); } else if (bio_op(bio) == REQ_OP_WRITE) { count_vm_events(PGPGOUT, bio_sectors(bio)); } bio_set_ioprio(bio); submit_bio_noacct(bio); } EXPORT_SYMBOL(submit_bio); /** * bio_poll - poll for BIO completions * @bio: bio to poll for * @iob: batches of IO * @flags: BLK_POLL_* flags that control the behavior * * Poll for completions on queue associated with the bio. Returns number of * completed entries found. * * Note: the caller must either be the context that submitted @bio, or * be in a RCU critical section to prevent freeing of @bio. */ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) { blk_qc_t cookie = READ_ONCE(bio->bi_cookie); struct block_device *bdev; struct request_queue *q; int ret = 0; bdev = READ_ONCE(bio->bi_bdev); if (!bdev) return 0; q = bdev_get_queue(bdev); if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL)) return 0; blk_flush_plug(current->plug, false); /* * We need to be able to enter a frozen queue, similar to how * timeouts also need to do that. If that is blocked, then we can * have pending IO when a queue freeze is started, and then the * wait for the freeze to finish will wait for polled requests to * timeout as the poller is preventer from entering the queue and * completing them. As long as we prevent new IO from being queued, * that should be all that matters. */ if (!percpu_ref_tryget(&q->q_usage_counter)) return 0; if (queue_is_mq(q)) { ret = blk_mq_poll(q, cookie, iob, flags); } else { struct gendisk *disk = q->disk; if (disk && disk->fops->poll_bio) ret = disk->fops->poll_bio(bio, iob, flags); } blk_queue_exit(q); return ret; } EXPORT_SYMBOL_GPL(bio_poll); /* * Helper to implement file_operations.iopoll. Requires the bio to be stored * in iocb->private, and cleared before freeing the bio. */ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, unsigned int flags) { struct bio *bio; int ret = 0; /* * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can * point to a freshly allocated bio at this point. If that happens * we have a few cases to consider: * * 1) the bio is beeing initialized and bi_bdev is NULL. We can just * simply nothing in this case * 2) the bio points to a not poll enabled device. bio_poll will catch * this and return 0 * 3) the bio points to a poll capable device, including but not * limited to the one that the original bio pointed to. In this * case we will call into the actual poll method and poll for I/O, * even if we don't need to, but it won't cause harm either. * * For cases 2) and 3) above the RCU grace period ensures that bi_bdev * is still allocated. Because partitions hold a reference to the whole * device bdev and thus disk, the disk is also still valid. Grabbing * a reference to the queue in bio_poll() ensures the hctxs and requests * are still valid as well. */ rcu_read_lock(); bio = READ_ONCE(kiocb->private); if (bio) ret = bio_poll(bio, iob, flags); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(iocb_bio_iopoll); void update_io_ticks(struct block_device *part, unsigned long now, bool end) { unsigned long stamp; again: stamp = READ_ONCE(part->bd_stamp); if (unlikely(time_after(now, stamp)) && likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) && (end || part_in_flight(part))) __part_stat_add(part, io_ticks, now - stamp); if (bdev_is_partition(part)) { part = bdev_whole(part); goto again; } } unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, unsigned long start_time) { part_stat_lock(); update_io_ticks(bdev, start_time, false); part_stat_local_inc(bdev, in_flight[op_is_write(op)]); part_stat_unlock(); return start_time; } EXPORT_SYMBOL(bdev_start_io_acct); /** * bio_start_io_acct - start I/O accounting for bio based drivers * @bio: bio to start account for * * Returns the start time that should be passed back to bio_end_io_acct(). */ unsigned long bio_start_io_acct(struct bio *bio) { return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies); } EXPORT_SYMBOL_GPL(bio_start_io_acct); void bdev_end_io_acct(struct block_device *bdev, enum req_op op, unsigned int sectors, unsigned long start_time) { const int sgrp = op_stat_group(op); unsigned long now = READ_ONCE(jiffies); unsigned long duration = now - start_time; part_stat_lock(); update_io_ticks(bdev, now, true); part_stat_inc(bdev, ios[sgrp]); part_stat_add(bdev, sectors[sgrp], sectors); part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration)); part_stat_local_dec(bdev, in_flight[op_is_write(op)]); part_stat_unlock(); } EXPORT_SYMBOL(bdev_end_io_acct); void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, struct block_device *orig_bdev) { bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time); } EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); /** * blk_lld_busy - Check if underlying low-level drivers of a device are busy * @q : the queue of the device being checked * * Description: * Check if underlying low-level drivers of a device are busy. * If the drivers want to export their busy state, they must set own * exporting function using blk_queue_lld_busy() first. * * Basically, this function is used only by request stacking drivers * to stop dispatching requests to underlying devices when underlying * devices are busy. This behavior helps more I/O merging on the queue * of the request stacking driver and prevents I/O throughput regression * on burst I/O load. * * Return: * 0 - Not busy (The request stacking driver should dispatch request) * 1 - Busy (The request stacking driver should stop dispatching request) */ int blk_lld_busy(struct request_queue *q) { if (queue_is_mq(q) && q->mq_ops->busy) return q->mq_ops->busy(q); return 0; } EXPORT_SYMBOL_GPL(blk_lld_busy); int kblockd_schedule_work(struct work_struct *work) { return queue_work(kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); } EXPORT_SYMBOL(kblockd_mod_delayed_work_on); void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) { struct task_struct *tsk = current; /* * If this is a nested plug, don't actually assign it. */ if (tsk->plug) return; plug->cur_ktime = 0; plug->mq_list = NULL; plug->cached_rq = NULL; plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); plug->rq_count = 0; plug->multiple_queues = false; plug->has_elevator = false; INIT_LIST_HEAD(&plug->cb_list); /* * Store ordering should not be needed here, since a potential * preempt will imply a full memory barrier */ tsk->plug = plug; } /** * blk_start_plug - initialize blk_plug and track it inside the task_struct * @plug: The &struct blk_plug that needs to be initialized * * Description: * blk_start_plug() indicates to the block layer an intent by the caller * to submit multiple I/O requests in a batch. The block layer may use * this hint to defer submitting I/Os from the caller until blk_finish_plug() * is called. However, the block layer may choose to submit requests * before a call to blk_finish_plug() if the number of queued I/Os * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if * the task schedules (see below). * * Tracking blk_plug inside the task_struct will help with auto-flushing the * pending I/O should the task end up blocking between blk_start_plug() and * blk_finish_plug(). This is important from a performance perspective, but * also ensures that we don't deadlock. For instance, if the task is blocking * for a memory allocation, memory reclaim could end up wanting to free a * page belonging to that request that is currently residing in our private * plug. By flushing the pending I/O when the process goes to sleep, we avoid * this kind of deadlock. */ void blk_start_plug(struct blk_plug *plug) { blk_start_plug_nr_ios(plug, 1); } EXPORT_SYMBOL(blk_start_plug); static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(callbacks); while (!list_empty(&plug->cb_list)) { list_splice_init(&plug->cb_list, &callbacks); while (!list_empty(&callbacks)) { struct blk_plug_cb *cb = list_first_entry(&callbacks, struct blk_plug_cb, list); list_del(&cb->list); cb->callback(cb, from_schedule); } } } struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, int size) { struct blk_plug *plug = current->plug; struct blk_plug_cb *cb; if (!plug) return NULL; list_for_each_entry(cb, &plug->cb_list, list) if (cb->callback == unplug && cb->data == data) return cb; /* Not currently on the callback list */ BUG_ON(size < sizeof(*cb)); cb = kzalloc(size, GFP_ATOMIC); if (cb) { cb->data = data; cb->callback = unplug; list_add(&cb->list, &plug->cb_list); } return cb; } EXPORT_SYMBOL(blk_check_plugged); void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) { if (!list_empty(&plug->cb_list)) flush_plug_callbacks(plug, from_schedule); blk_mq_flush_plug_list(plug, from_schedule); /* * Unconditionally flush out cached requests, even if the unplug * event came from schedule. Since we know hold references to the * queue for cached requests, we don't want a blocked task holding * up a queue freeze/quiesce event. */ if (unlikely(!rq_list_empty(plug->cached_rq))) blk_mq_free_plug_rqs(plug); plug->cur_ktime = 0; current->flags &= ~PF_BLOCK_TS; } /** * blk_finish_plug - mark the end of a batch of submitted I/O * @plug: The &struct blk_plug passed to blk_start_plug() * * Description: * Indicate that a batch of I/O submissions is complete. This function * must be paired with an initial call to blk_start_plug(). The intent * is to allow the block layer to optimize I/O submission. See the * documentation for blk_start_plug() for more information. */ void blk_finish_plug(struct blk_plug *plug) { if (plug == current->plug) { __blk_flush_plug(plug, false); current->plug = NULL; } } EXPORT_SYMBOL(blk_finish_plug); void blk_io_schedule(void) { /* Prevent hang_check timer from firing at us during very long I/O */ unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; if (timeout) io_schedule_timeout(timeout); else io_schedule(); } EXPORT_SYMBOL_GPL(blk_io_schedule); int __init blk_dev_init(void) { BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS)); BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * sizeof_field(struct request, cmd_flags)); BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * sizeof_field(struct bio, bi_opf)); /* used for unplugging and affects IO latency/throughput - HIGHPRI */ kblockd_workqueue = alloc_workqueue("kblockd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!kblockd_workqueue) panic("Failed to create kblockd\n"); blk_requestq_cachep = KMEM_CACHE(request_queue, SLAB_PANIC); blk_debugfs_root = debugfs_create_dir("block", NULL); return 0; } |
1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 | /* SPDX-License-Identifier: GPL-2.0-only */ /* Driver for Realtek RTS5139 USB card reader * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * Author: * Roger Tseng <rogerable@realtek.com> */ #ifndef __RTSX_USB_H #define __RTSX_USB_H #include <linux/usb.h> /* related module names */ #define RTSX_USB_SD_CARD 0 #define RTSX_USB_MS_CARD 1 /* endpoint numbers */ #define EP_BULK_OUT 1 #define EP_BULK_IN 2 #define EP_INTR_IN 3 /* USB vendor requests */ #define RTSX_USB_REQ_REG_OP 0x00 #define RTSX_USB_REQ_POLL 0x02 /* miscellaneous parameters */ #define MIN_DIV_N 60 #define MAX_DIV_N 120 #define MAX_PHASE 15 #define RX_TUNING_CNT 3 #define QFN24 0 #define LQFP48 1 #define CHECK_PKG(ucr, pkg) ((ucr)->package == (pkg)) /* data structures */ struct rtsx_ucr { u16 vendor_id; u16 product_id; int package; u8 ic_version; bool is_rts5179; unsigned int cur_clk; u8 *cmd_buf; unsigned int cmd_idx; u8 *rsp_buf; struct usb_device *pusb_dev; struct usb_interface *pusb_intf; struct usb_sg_request current_sg; struct timer_list sg_timer; struct mutex dev_mutex; }; /* buffer size */ #define IOBUF_SIZE 1024 /* prototypes of exported functions */ extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status); extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr, u8 mask, u8 data); extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout); extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout); extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, void *buf, unsigned int len, int use_sg, unsigned int *act_len, int timeout); extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock, u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card); /* card status */ #define SD_CD 0x01 #define MS_CD 0x02 #define XD_CD 0x04 #define CD_MASK (SD_CD | MS_CD | XD_CD) #define SD_WP 0x08 /* reader command field offset & parameters */ #define READ_REG_CMD 0 #define WRITE_REG_CMD 1 #define CHECK_REG_CMD 2 #define PACKET_TYPE 4 #define CNT_H 5 #define CNT_L 6 #define STAGE_FLAG 7 #define CMD_OFFSET 8 #define SEQ_WRITE_DATA_OFFSET 12 #define BATCH_CMD 0 #define SEQ_READ 1 #define SEQ_WRITE 2 #define STAGE_R 0x01 #define STAGE_DI 0x02 #define STAGE_DO 0x04 #define STAGE_MS_STATUS 0x08 #define STAGE_XD_STATUS 0x10 #define MODE_C 0x00 #define MODE_CR (STAGE_R) #define MODE_CDIR (STAGE_R | STAGE_DI) #define MODE_CDOR (STAGE_R | STAGE_DO) #define EP0_OP_SHIFT 14 #define EP0_READ_REG_CMD 2 #define EP0_WRITE_REG_CMD 3 #define rtsx_usb_cmd_hdr_tag(ucr) \ do { \ ucr->cmd_buf[0] = 'R'; \ ucr->cmd_buf[1] = 'T'; \ ucr->cmd_buf[2] = 'C'; \ ucr->cmd_buf[3] = 'R'; \ } while (0) static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr) { rtsx_usb_cmd_hdr_tag(ucr); ucr->cmd_idx = 0; ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD; } /* internal register address */ #define FPDCTL 0xFC00 #define SSC_DIV_N_0 0xFC07 #define SSC_CTL1 0xFC09 #define SSC_CTL2 0xFC0A #define CFG_MODE 0xFC0E #define CFG_MODE_1 0xFC0F #define RCCTL 0xFC14 #define SOF_WDOG 0xFC28 #define SYS_DUMMY0 0xFC30 #define MS_BLKEND 0xFD30 #define MS_READ_START 0xFD31 #define MS_READ_COUNT 0xFD32 #define MS_WRITE_START 0xFD33 #define MS_WRITE_COUNT 0xFD34 #define MS_COMMAND 0xFD35 #define MS_OLD_BLOCK_0 0xFD36 #define MS_OLD_BLOCK_1 0xFD37 #define MS_NEW_BLOCK_0 0xFD38 #define MS_NEW_BLOCK_1 0xFD39 #define MS_LOG_BLOCK_0 0xFD3A #define MS_LOG_BLOCK_1 0xFD3B #define MS_BUS_WIDTH 0xFD3C #define MS_PAGE_START 0xFD3D #define MS_PAGE_LENGTH 0xFD3E #define MS_CFG 0xFD40 #define MS_TPC 0xFD41 #define MS_TRANS_CFG 0xFD42 #define MS_TRANSFER 0xFD43 #define MS_INT_REG 0xFD44 #define MS_BYTE_CNT 0xFD45 #define MS_SECTOR_CNT_L 0xFD46 #define MS_SECTOR_CNT_H 0xFD47 #define MS_DBUS_H 0xFD48 #define CARD_DMA1_CTL 0xFD5C #define CARD_PULL_CTL1 0xFD60 #define CARD_PULL_CTL2 0xFD61 #define CARD_PULL_CTL3 0xFD62 #define CARD_PULL_CTL4 0xFD63 #define CARD_PULL_CTL5 0xFD64 #define CARD_PULL_CTL6 0xFD65 #define CARD_EXIST 0xFD6F #define CARD_INT_PEND 0xFD71 #define LDO_POWER_CFG 0xFD7B #define SD_CFG1 0xFDA0 #define SD_CFG2 0xFDA1 #define SD_CFG3 0xFDA2 #define SD_STAT1 0xFDA3 #define SD_STAT2 0xFDA4 #define SD_BUS_STAT 0xFDA5 #define SD_PAD_CTL 0xFDA6 #define SD_SAMPLE_POINT_CTL 0xFDA7 #define SD_PUSH_POINT_CTL 0xFDA8 #define SD_CMD0 0xFDA9 #define SD_CMD1 0xFDAA #define SD_CMD2 0xFDAB #define SD_CMD3 0xFDAC #define SD_CMD4 0xFDAD #define SD_CMD5 0xFDAE #define SD_BYTE_CNT_L 0xFDAF #define SD_BYTE_CNT_H 0xFDB0 #define SD_BLOCK_CNT_L 0xFDB1 #define SD_BLOCK_CNT_H 0xFDB2 #define SD_TRANSFER 0xFDB3 #define SD_CMD_STATE 0xFDB5 #define SD_DATA_STATE 0xFDB6 #define SD_VPCLK0_CTL 0xFC2A #define SD_VPCLK1_CTL 0xFC2B #define SD_DCMPS0_CTL 0xFC2C #define SD_DCMPS1_CTL 0xFC2D #define CARD_DMA1_CTL 0xFD5C #define HW_VERSION 0xFC01 #define SSC_CLK_FPGA_SEL 0xFC02 #define CLK_DIV 0xFC03 #define SFSM_ED 0xFC04 #define CD_DEGLITCH_WIDTH 0xFC20 #define CD_DEGLITCH_EN 0xFC21 #define AUTO_DELINK_EN 0xFC23 #define FPGA_PULL_CTL 0xFC1D #define CARD_CLK_SOURCE 0xFC2E #define CARD_SHARE_MODE 0xFD51 #define CARD_DRIVE_SEL 0xFD52 #define CARD_STOP 0xFD53 #define CARD_OE 0xFD54 #define CARD_AUTO_BLINK 0xFD55 #define CARD_GPIO 0xFD56 #define SD30_DRIVE_SEL 0xFD57 #define CARD_DATA_SOURCE 0xFD5D #define CARD_SELECT 0xFD5E #define CARD_CLK_EN 0xFD79 #define CARD_PWR_CTL 0xFD7A #define OCPCTL 0xFD80 #define OCPPARA1 0xFD81 #define OCPPARA2 0xFD82 #define OCPSTAT 0xFD83 #define HS_USB_STAT 0xFE01 #define HS_VCONTROL 0xFE26 #define HS_VSTAIN 0xFE27 #define HS_VLOADM 0xFE28 #define HS_VSTAOUT 0xFE29 #define MC_IRQ 0xFF00 #define MC_IRQEN 0xFF01 #define MC_FIFO_CTL 0xFF02 #define MC_FIFO_BC0 0xFF03 #define MC_FIFO_BC1 0xFF04 #define MC_FIFO_STAT 0xFF05 #define MC_FIFO_MODE 0xFF06 #define MC_FIFO_RD_PTR0 0xFF07 #define MC_FIFO_RD_PTR1 0xFF08 #define MC_DMA_CTL 0xFF10 #define MC_DMA_TC0 0xFF11 #define MC_DMA_TC1 0xFF12 #define MC_DMA_TC2 0xFF13 #define MC_DMA_TC3 0xFF14 #define MC_DMA_RST 0xFF15 #define RBUF_SIZE_MASK 0xFBFF #define RBUF_BASE 0xF000 #define PPBUF_BASE1 0xF800 #define PPBUF_BASE2 0xFA00 /* internal register value macros */ #define POWER_OFF 0x03 #define PARTIAL_POWER_ON 0x02 #define POWER_ON 0x00 #define POWER_MASK 0x03 #define LDO3318_PWR_MASK 0x0C #define LDO_ON 0x00 #define LDO_SUSPEND 0x08 #define LDO_OFF 0x0C #define DV3318_AUTO_PWR_OFF 0x10 #define FORCE_LDO_POWERB 0x60 /* LDO_POWER_CFG */ #define TUNE_SD18_MASK 0x1C #define TUNE_SD18_1V7 0x00 #define TUNE_SD18_1V8 (0x01 << 2) #define TUNE_SD18_1V9 (0x02 << 2) #define TUNE_SD18_2V0 (0x03 << 2) #define TUNE_SD18_2V7 (0x04 << 2) #define TUNE_SD18_2V8 (0x05 << 2) #define TUNE_SD18_2V9 (0x06 << 2) #define TUNE_SD18_3V3 (0x07 << 2) /* CLK_DIV */ #define CLK_CHANGE 0x80 #define CLK_DIV_1 0x00 #define CLK_DIV_2 0x01 #define CLK_DIV_4 0x02 #define CLK_DIV_8 0x03 #define SSC_POWER_MASK 0x01 #define SSC_POWER_DOWN 0x01 #define SSC_POWER_ON 0x00 #define FPGA_VER 0x80 #define HW_VER_MASK 0x0F #define EXTEND_DMA1_ASYNC_SIGNAL 0x02 /* CFG_MODE*/ #define XTAL_FREE 0x80 #define CLK_MODE_MASK 0x03 #define CLK_MODE_12M_XTAL 0x00 #define CLK_MODE_NON_XTAL 0x01 #define CLK_MODE_24M_OSC 0x02 #define CLK_MODE_48M_OSC 0x03 /* CFG_MODE_1*/ #define RTS5179 0x02 #define NYET_EN 0x01 #define NYET_MSAK 0x01 #define SD30_DRIVE_MASK 0x07 #define SD20_DRIVE_MASK 0x03 #define DISABLE_SD_CD 0x08 #define DISABLE_MS_CD 0x10 #define DISABLE_XD_CD 0x20 #define SD_CD_DEGLITCH_EN 0x01 #define MS_CD_DEGLITCH_EN 0x02 #define XD_CD_DEGLITCH_EN 0x04 #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SD30_DRIVE_SEL */ #define DRIVER_TYPE_A 0x05 #define DRIVER_TYPE_B 0x03 #define DRIVER_TYPE_C 0x02 #define DRIVER_TYPE_D 0x01 /* SD_BUS_STAT */ #define SD_CLK_TOGGLE_EN 0x80 #define SD_CLK_FORCE_STOP 0x40 #define SD_DAT3_STATUS 0x10 #define SD_DAT2_STATUS 0x08 #define SD_DAT1_STATUS 0x04 #define SD_DAT0_STATUS 0x02 #define SD_CMD_STATUS 0x01 /* SD_PAD_CTL */ #define SD_IO_USING_1V8 0x80 #define SD_IO_USING_3V3 0x7F #define TYPE_A_DRIVING 0x00 #define TYPE_B_DRIVING 0x01 #define TYPE_C_DRIVING 0x02 #define TYPE_D_DRIVING 0x03 /* CARD_CLK_EN */ #define SD_CLK_EN 0x04 #define MS_CLK_EN 0x08 /* CARD_SELECT */ #define SD_MOD_SEL 2 #define MS_MOD_SEL 3 /* CARD_SHARE_MODE */ #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SSC_CTL1 */ #define SSC_RSTB 0x80 #define SSC_8X_EN 0x40 #define SSC_FIX_FRAC 0x20 #define SSC_SEL_1M 0x00 #define SSC_SEL_2M 0x08 #define SSC_SEL_4M 0x10 #define SSC_SEL_8M 0x18 /* SSC_CTL2 */ #define SSC_DEPTH_MASK 0x03 #define SSC_DEPTH_DISALBE 0x00 #define SSC_DEPTH_2M 0x01 #define SSC_DEPTH_1M 0x02 #define SSC_DEPTH_512K 0x03 /* SD_VPCLK0_CTL */ #define PHASE_CHANGE 0x80 #define PHASE_NOT_RESET 0x40 /* SD_TRANSFER */ #define SD_TRANSFER_START 0x80 #define SD_TRANSFER_END 0x40 #define SD_STAT_IDLE 0x20 #define SD_TRANSFER_ERR 0x10 #define SD_TM_NORMAL_WRITE 0x00 #define SD_TM_AUTO_WRITE_3 0x01 #define SD_TM_AUTO_WRITE_4 0x02 #define SD_TM_AUTO_READ_3 0x05 #define SD_TM_AUTO_READ_4 0x06 #define SD_TM_CMD_RSP 0x08 #define SD_TM_AUTO_WRITE_1 0x09 #define SD_TM_AUTO_WRITE_2 0x0A #define SD_TM_NORMAL_READ 0x0C #define SD_TM_AUTO_READ_1 0x0D #define SD_TM_AUTO_READ_2 0x0E #define SD_TM_AUTO_TUNING 0x0F /* SD_CFG1 */ #define SD_CLK_DIVIDE_0 0x00 #define SD_CLK_DIVIDE_256 0xC0 #define SD_CLK_DIVIDE_128 0x80 #define SD_CLK_DIVIDE_MASK 0xC0 #define SD_BUS_WIDTH_1BIT 0x00 #define SD_BUS_WIDTH_4BIT 0x01 #define SD_BUS_WIDTH_8BIT 0x02 #define SD_ASYNC_FIFO_RST 0x10 #define SD_20_MODE 0x00 #define SD_DDR_MODE 0x04 #define SD_30_MODE 0x08 /* SD_CFG2 */ #define SD_CALCULATE_CRC7 0x00 #define SD_NO_CALCULATE_CRC7 0x80 #define SD_CHECK_CRC16 0x00 #define SD_NO_CHECK_CRC16 0x40 #define SD_WAIT_CRC_TO_EN 0x20 #define SD_WAIT_BUSY_END 0x08 #define SD_NO_WAIT_BUSY_END 0x00 #define SD_CHECK_CRC7 0x00 #define SD_NO_CHECK_CRC7 0x04 #define SD_RSP_LEN_0 0x00 #define SD_RSP_LEN_6 0x01 #define SD_RSP_LEN_17 0x02 #define SD_RSP_TYPE_R0 0x04 #define SD_RSP_TYPE_R1 0x01 #define SD_RSP_TYPE_R1b 0x09 #define SD_RSP_TYPE_R2 0x02 #define SD_RSP_TYPE_R3 0x05 #define SD_RSP_TYPE_R4 0x05 #define SD_RSP_TYPE_R5 0x01 #define SD_RSP_TYPE_R6 0x01 #define SD_RSP_TYPE_R7 0x01 /* SD_STAT1 */ #define SD_CRC7_ERR 0x80 #define SD_CRC16_ERR 0x40 #define SD_CRC_WRITE_ERR 0x20 #define SD_CRC_WRITE_ERR_MASK 0x1C #define GET_CRC_TIME_OUT 0x02 #define SD_TUNING_COMPARE_ERR 0x01 /* SD_DATA_STATE */ #define SD_DATA_IDLE 0x80 /* CARD_DATA_SOURCE */ #define PINGPONG_BUFFER 0x01 #define RING_BUFFER 0x00 /* CARD_OE */ #define SD_OUTPUT_EN 0x04 #define MS_OUTPUT_EN 0x08 /* CARD_STOP */ #define SD_STOP 0x04 #define MS_STOP 0x08 #define SD_CLR_ERR 0x40 #define MS_CLR_ERR 0x80 /* CARD_CLK_SOURCE */ #define CRC_FIX_CLK (0x00 << 0) #define CRC_VAR_CLK0 (0x01 << 0) #define CRC_VAR_CLK1 (0x02 << 0) #define SD30_FIX_CLK (0x00 << 2) #define SD30_VAR_CLK0 (0x01 << 2) #define SD30_VAR_CLK1 (0x02 << 2) #define SAMPLE_FIX_CLK (0x00 << 4) #define SAMPLE_VAR_CLK0 (0x01 << 4) #define SAMPLE_VAR_CLK1 (0x02 << 4) /* SD_SAMPLE_POINT_CTL */ #define DDR_FIX_RX_DAT 0x00 #define DDR_VAR_RX_DAT 0x80 #define DDR_FIX_RX_DAT_EDGE 0x00 #define DDR_FIX_RX_DAT_14_DELAY 0x40 #define DDR_FIX_RX_CMD 0x00 #define DDR_VAR_RX_CMD 0x20 #define DDR_FIX_RX_CMD_POS_EDGE 0x00 #define DDR_FIX_RX_CMD_14_DELAY 0x10 #define SD20_RX_POS_EDGE 0x00 #define SD20_RX_14_DELAY 0x08 #define SD20_RX_SEL_MASK 0x08 /* SD_PUSH_POINT_CTL */ #define DDR_FIX_TX_CMD_DAT 0x00 #define DDR_VAR_TX_CMD_DAT 0x80 #define DDR_FIX_TX_DAT_14_TSU 0x00 #define DDR_FIX_TX_DAT_12_TSU 0x40 #define DDR_FIX_TX_CMD_NEG_EDGE 0x00 #define DDR_FIX_TX_CMD_14_AHEAD 0x20 #define SD20_TX_NEG_EDGE 0x00 #define SD20_TX_14_AHEAD 0x10 #define SD20_TX_SEL_MASK 0x10 #define DDR_VAR_SDCLK_POL_SWAP 0x01 /* MS_CFG */ #define SAMPLE_TIME_RISING 0x00 #define SAMPLE_TIME_FALLING 0x80 #define PUSH_TIME_DEFAULT 0x00 #define PUSH_TIME_ODD 0x40 #define NO_EXTEND_TOGGLE 0x00 #define EXTEND_TOGGLE_CHK 0x20 #define MS_BUS_WIDTH_1 0x00 #define MS_BUS_WIDTH_4 0x10 #define MS_BUS_WIDTH_8 0x18 #define MS_2K_SECTOR_MODE 0x04 #define MS_512_SECTOR_MODE 0x00 #define MS_TOGGLE_TIMEOUT_EN 0x00 #define MS_TOGGLE_TIMEOUT_DISEN 0x01 #define MS_NO_CHECK_INT 0x02 /* MS_TRANS_CFG */ #define WAIT_INT 0x80 #define NO_WAIT_INT 0x00 #define NO_AUTO_READ_INT_REG 0x00 #define AUTO_READ_INT_REG 0x40 #define MS_CRC16_ERR 0x20 #define MS_RDY_TIMEOUT 0x10 #define MS_INT_CMDNK 0x08 #define MS_INT_BREQ 0x04 #define MS_INT_ERR 0x02 #define MS_INT_CED 0x01 /* MS_TRANSFER */ #define MS_TRANSFER_START 0x80 #define MS_TRANSFER_END 0x40 #define MS_TRANSFER_ERR 0x20 #define MS_BS_STATE 0x10 #define MS_TM_READ_BYTES 0x00 #define MS_TM_NORMAL_READ 0x01 #define MS_TM_WRITE_BYTES 0x04 #define MS_TM_NORMAL_WRITE 0x05 #define MS_TM_AUTO_READ 0x08 #define MS_TM_AUTO_WRITE 0x0C #define MS_TM_SET_CMD 0x06 #define MS_TM_COPY_PAGE 0x07 #define MS_TM_MULTI_READ 0x02 #define MS_TM_MULTI_WRITE 0x03 /* MC_FIFO_CTL */ #define FIFO_FLUSH 0x01 /* MC_DMA_RST */ #define DMA_RESET 0x01 /* MC_DMA_CTL */ #define DMA_TC_EQ_0 0x80 #define DMA_DIR_TO_CARD 0x00 #define DMA_DIR_FROM_CARD 0x02 #define DMA_EN 0x01 #define DMA_128 (0 << 2) #define DMA_256 (1 << 2) #define DMA_512 (2 << 2) #define DMA_1024 (3 << 2) #define DMA_PACK_SIZE_MASK 0x0C /* CARD_INT_PEND */ #define XD_INT 0x10 #define MS_INT 0x08 #define SD_INT 0x04 /* LED operations*/ static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02); } static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03); } /* HW error clearing */ static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8); } static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH); rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET); } #endif /* __RTS51139_H */ |
3083 3082 2852 3074 3075 3074 3083 3077 2993 2993 2736 173 2989 3070 2488 3076 3082 2947 3074 3069 125 299 2947 3074 3083 3069 685 685 685 685 682 683 684 685 121 681 683 257 476 685 257 476 115 684 2977 684 2978 2977 684 685 2992 2989 2969 2971 2966 2964 2978 2976 2973 2980 3082 3083 3077 3082 3066 3064 3072 685 3065 3069 3070 3077 3074 3083 3083 3068 3075 3074 3083 3083 2989 3074 3074 3071 3066 3072 3063 332 47 2947 2981 2980 179 2985 2980 2947 3066 3070 3056 3070 3056 3060 179 179 179 179 179 179 179 177 178 179 179 179 127 126 126 125 127 127 127 127 127 67 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * * High-resolution kernel timers * * In contrast to the low-resolution timeout API, aka timer wheel, * hrtimers provide finer resolution and accuracy depending on system * configuration and capabilities. * * Started by: Thomas Gleixner and Ingo Molnar * * Credits: * Based on the original timer wheel code * * Help, testing, suggestions, bugfixes, improvements were * provided by: * * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel * et. al. */ #include <linux/cpu.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/hrtimer.h> #include <linux/notifier.h> #include <linux/syscalls.h> #include <linux/interrupt.h> #include <linux/tick.h> #include <linux/err.h> #include <linux/debugobjects.h> #include <linux/sched/signal.h> #include <linux/sched/sysctl.h> #include <linux/sched/rt.h> #include <linux/sched/deadline.h> #include <linux/sched/nohz.h> #include <linux/sched/debug.h> #include <linux/sched/isolation.h> #include <linux/timer.h> #include <linux/freezer.h> #include <linux/compat.h> #include <linux/uaccess.h> #include <trace/events/timer.h> #include "tick-internal.h" /* * Masks for selecting the soft and hard context timers from * cpu_base->active */ #define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT) #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1) #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD) /* * The timer bases: * * There are more clockids than hrtimer bases. Thus, we index * into the timer bases by the hrtimer_base_type enum. When trying * to reach a base using a clockid, hrtimer_clockid_to_base() * is used to convert from clockid to the proper hrtimer_base_type. */ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = { .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), .clock_base = { { .index = HRTIMER_BASE_MONOTONIC, .clockid = CLOCK_MONOTONIC, .get_time = &ktime_get, }, { .index = HRTIMER_BASE_REALTIME, .clockid = CLOCK_REALTIME, .get_time = &ktime_get_real, }, { .index = HRTIMER_BASE_BOOTTIME, .clockid = CLOCK_BOOTTIME, .get_time = &ktime_get_boottime, }, { .index = HRTIMER_BASE_TAI, .clockid = CLOCK_TAI, .get_time = &ktime_get_clocktai, }, { .index = HRTIMER_BASE_MONOTONIC_SOFT, .clockid = CLOCK_MONOTONIC, .get_time = &ktime_get, }, { .index = HRTIMER_BASE_REALTIME_SOFT, .clockid = CLOCK_REALTIME, .get_time = &ktime_get_real, }, { .index = HRTIMER_BASE_BOOTTIME_SOFT, .clockid = CLOCK_BOOTTIME, .get_time = &ktime_get_boottime, }, { .index = HRTIMER_BASE_TAI_SOFT, .clockid = CLOCK_TAI, .get_time = &ktime_get_clocktai, }, } }; static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { /* Make sure we catch unsupported clockids */ [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES, [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, [CLOCK_TAI] = HRTIMER_BASE_TAI, }; /* * Functions and macros which are different for UP/SMP systems are kept in a * single place */ #ifdef CONFIG_SMP /* * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base() * such that hrtimer_callback_running() can unconditionally dereference * timer->base->cpu_base */ static struct hrtimer_cpu_base migration_cpu_base = { .clock_base = { { .cpu_base = &migration_cpu_base, .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq, &migration_cpu_base.lock), }, }, }; #define migration_base migration_cpu_base.clock_base[0] static inline bool is_migration_base(struct hrtimer_clock_base *base) { return base == &migration_base; } /* * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock * means that all timers which are tied to this base via timer->base are * locked, and the base itself is locked too. * * So __run_timers/migrate_timers can safely modify all timers which could * be found on the lists/queues. * * When the timer's base is locked, and the timer removed from list, it is * possible to set timer->base = &migration_base and drop the lock: the timer * remains locked. */ static struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) __acquires(&timer->base->lock) { struct hrtimer_clock_base *base; for (;;) { base = READ_ONCE(timer->base); if (likely(base != &migration_base)) { raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); if (likely(base == timer->base)) return base; /* The timer has migrated to another CPU: */ raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); } cpu_relax(); } } /* * We do not migrate the timer when it is expiring before the next * event on the target cpu. When high resolution is enabled, we cannot * reprogram the target cpu hardware and we would cause it to fire * late. To keep it simple, we handle the high resolution enabled and * disabled case similar. * * Called with cpu_base->lock of target cpu held. */ static int hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) { ktime_t expires; expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); return expires < new_base->cpu_base->expires_next; } static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned) { #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) if (static_branch_likely(&timers_migration_enabled) && !pinned) return &per_cpu(hrtimer_bases, get_nohz_timer_target()); #endif return base; } /* * We switch the timer base to a power-optimized selected CPU target, * if: * - NO_HZ_COMMON is enabled * - timer migration is enabled * - the timer callback is not running * - the timer is not the first expiring timer on the new target * * If one of the above requirements is not fulfilled we move the timer * to the current CPU or leave it on the previously assigned CPU if * the timer callback is currently running. */ static inline struct hrtimer_clock_base * switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, int pinned) { struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; struct hrtimer_clock_base *new_base; int basenum = base->index; this_cpu_base = this_cpu_ptr(&hrtimer_bases); new_cpu_base = get_target_base(this_cpu_base, pinned); again: new_base = &new_cpu_base->clock_base[basenum]; if (base != new_base) { /* * We are trying to move timer to new_base. * However we can't change timer's base while it is running, * so we keep it on the same CPU. No hassle vs. reprogramming * the event source in the high resolution case. The softirq * code will take care of this when the timer function has * completed. There is no conflict as we hold the lock until * the timer is enqueued. */ if (unlikely(hrtimer_callback_running(timer))) return base; /* See the comment in lock_hrtimer_base() */ WRITE_ONCE(timer->base, &migration_base); raw_spin_unlock(&base->cpu_base->lock); raw_spin_lock(&new_base->cpu_base->lock); if (new_cpu_base != this_cpu_base && hrtimer_check_target(timer, new_base)) { raw_spin_unlock(&new_base->cpu_base->lock); raw_spin_lock(&base->cpu_base->lock); new_cpu_base = this_cpu_base; WRITE_ONCE(timer->base, base); goto again; } WRITE_ONCE(timer->base, new_base); } else { if (new_cpu_base != this_cpu_base && hrtimer_check_target(timer, new_base)) { new_cpu_base = this_cpu_base; goto again; } } return new_base; } #else /* CONFIG_SMP */ static inline bool is_migration_base(struct hrtimer_clock_base *base) { return false; } static inline struct hrtimer_clock_base * lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) __acquires(&timer->base->cpu_base->lock) { struct hrtimer_clock_base *base = timer->base; raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); return base; } # define switch_hrtimer_base(t, b, p) (b) #endif /* !CONFIG_SMP */ /* * Functions for the union type storage format of ktime_t which are * too large for inlining: */ #if BITS_PER_LONG < 64 /* * Divide a ktime value by a nanosecond value */ s64 __ktime_divns(const ktime_t kt, s64 div) { int sft = 0; s64 dclc; u64 tmp; dclc = ktime_to_ns(kt); tmp = dclc < 0 ? -dclc : dclc; /* Make sure the divisor is less than 2^32: */ while (div >> 32) { sft++; div >>= 1; } tmp >>= sft; do_div(tmp, (u32) div); return dclc < 0 ? -tmp : tmp; } EXPORT_SYMBOL_GPL(__ktime_divns); #endif /* BITS_PER_LONG >= 64 */ /* * Add two ktime values and do a safety check for overflow: */ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) { ktime_t res = ktime_add_unsafe(lhs, rhs); /* * We use KTIME_SEC_MAX here, the maximum timeout which we can * return to user space in a timespec: */ if (res < 0 || res < lhs || res < rhs) res = ktime_set(KTIME_SEC_MAX, 0); return res; } EXPORT_SYMBOL_GPL(ktime_add_safe); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS static const struct debug_obj_descr hrtimer_debug_descr; static void *hrtimer_debug_hint(void *addr) { return ((struct hrtimer *) addr)->function; } /* * fixup_init is called when: * - an active object is initialized */ static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) { struct hrtimer *timer = addr; switch (state) { case ODEBUG_STATE_ACTIVE: hrtimer_cancel(timer); debug_object_init(timer, &hrtimer_debug_descr); return true; default: return false; } } /* * fixup_activate is called when: * - an active object is activated * - an unknown non-static object is activated */ static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) { switch (state) { case ODEBUG_STATE_ACTIVE: WARN_ON(1); fallthrough; default: return false; } } /* * fixup_free is called when: * - an active object is freed */ static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) { struct hrtimer *timer = addr; switch (state) { case ODEBUG_STATE_ACTIVE: hrtimer_cancel(timer); debug_object_free(timer, &hrtimer_debug_descr); return true; default: return false; } } static const struct debug_obj_descr hrtimer_debug_descr = { .name = "hrtimer", .debug_hint = hrtimer_debug_hint, .fixup_init = hrtimer_fixup_init, .fixup_activate = hrtimer_fixup_activate, .fixup_free = hrtimer_fixup_free, }; static inline void debug_hrtimer_init(struct hrtimer *timer) { debug_object_init(timer, &hrtimer_debug_descr); } static inline void debug_hrtimer_activate(struct hrtimer *timer, enum hrtimer_mode mode) { debug_object_activate(timer, &hrtimer_debug_descr); } static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { debug_object_deactivate(timer, &hrtimer_debug_descr); } static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode); void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode) { debug_object_init_on_stack(timer, &hrtimer_debug_descr); __hrtimer_init(timer, clock_id, mode); } EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, enum hrtimer_mode mode); void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id, enum hrtimer_mode mode) { debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); __hrtimer_init_sleeper(sl, clock_id, mode); } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack); void destroy_hrtimer_on_stack(struct hrtimer *timer) { debug_object_free(timer, &hrtimer_debug_descr); } EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); #else static inline void debug_hrtimer_init(struct hrtimer *timer) { } static inline void debug_hrtimer_activate(struct hrtimer *timer, enum hrtimer_mode mode) { } static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } #endif static inline void debug_init(struct hrtimer *timer, clockid_t clockid, enum hrtimer_mode mode) { debug_hrtimer_init(timer); trace_hrtimer_init(timer, clockid, mode); } static inline void debug_activate(struct hrtimer *timer, enum hrtimer_mode mode) { debug_hrtimer_activate(timer, mode); trace_hrtimer_start(timer, mode); } static inline void debug_deactivate(struct hrtimer *timer) { debug_hrtimer_deactivate(timer); trace_hrtimer_cancel(timer); } static struct hrtimer_clock_base * __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) { unsigned int idx; if (!*active) return NULL; idx = __ffs(*active); *active &= ~(1U << idx); return &cpu_base->clock_base[idx]; } #define for_each_active_base(base, cpu_base, active) \ while ((base = __next_base((cpu_base), &(active)))) static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, const struct hrtimer *exclude, unsigned int active, ktime_t expires_next) { struct hrtimer_clock_base *base; ktime_t expires; for_each_active_base(base, cpu_base, active) { struct timerqueue_node *next; struct hrtimer *timer; next = timerqueue_getnext(&base->active); timer = container_of(next, struct hrtimer, node); if (timer == exclude) { /* Get to the next timer in the queue. */ next = timerqueue_iterate_next(next); if (!next) continue; timer = container_of(next, struct hrtimer, node); } expires = ktime_sub(hrtimer_get_expires(timer), base->offset); if (expires < expires_next) { expires_next = expires; /* Skip cpu_base update if a timer is being excluded. */ if (exclude) continue; if (timer->is_soft) cpu_base->softirq_next_timer = timer; else cpu_base->next_timer = timer; } } /* * clock_was_set() might have changed base->offset of any of * the clock bases so the result might be negative. Fix it up * to prevent a false positive in clockevents_program_event(). */ if (expires_next < 0) expires_next = 0; return expires_next; } /* * Recomputes cpu_base::*next_timer and returns the earliest expires_next * but does not set cpu_base::*expires_next, that is done by * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating * cpu_base::*expires_next right away, reprogramming logic would no longer * work. * * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases, * those timers will get run whenever the softirq gets handled, at the end of * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases. * * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases. * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD. * * @active_mask must be one of: * - HRTIMER_ACTIVE_ALL, * - HRTIMER_ACTIVE_SOFT, or * - HRTIMER_ACTIVE_HARD. */ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask) { unsigned int active; struct hrtimer *next_timer = NULL; ktime_t expires_next = KTIME_MAX; if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; cpu_base->softirq_next_timer = NULL; expires_next = __hrtimer_next_event_base(cpu_base, NULL, active, KTIME_MAX); next_timer = cpu_base->softirq_next_timer; } if (active_mask & HRTIMER_ACTIVE_HARD) { active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; cpu_base->next_timer = next_timer; expires_next = __hrtimer_next_event_base(cpu_base, NULL, active, expires_next); } return expires_next; } static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base) { ktime_t expires_next, soft = KTIME_MAX; /* * If the soft interrupt has already been activated, ignore the * soft bases. They will be handled in the already raised soft * interrupt. */ if (!cpu_base->softirq_activated) { soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); /* * Update the soft expiry time. clock_settime() might have * affected it. */ cpu_base->softirq_expires_next = soft; } expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); /* * If a softirq timer is expiring first, update cpu_base->next_timer * and program the hardware with the soft expiry time. */ if (expires_next > soft) { cpu_base->next_timer = cpu_base->softirq_next_timer; expires_next = soft; } return expires_next; } static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) { ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, offs_real, offs_boot, offs_tai); base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; return now; } /* * Is the high resolution mode active ? */ static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) { return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? cpu_base->hres_active : 0; } static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base, struct hrtimer *next_timer, ktime_t expires_next) { cpu_base->expires_next = expires_next; /* * If hres is not active, hardware does not have to be * reprogrammed yet. * * If a hang was detected in the last timer interrupt then we * leave the hang delay active in the hardware. We want the * system to make progress. That also prevents the following * scenario: * T1 expires 50ms from now * T2 expires 5s from now * * T1 is removed, so this code is called and would reprogram * the hardware to 5s from now. Any hrtimer_start after that * will not reprogram the hardware due to hang_detected being * set. So we'd effectively block all timers until the T2 event * fires. */ if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) return; tick_program_event(expires_next, 1); } /* * Reprogram the event source with checking both queues for the * next event * Called with interrupts disabled and base->lock held */ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) { ktime_t expires_next; expires_next = hrtimer_update_next_event(cpu_base); if (skip_equal && expires_next == cpu_base->expires_next) return; __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next); } /* High resolution timer related functions */ #ifdef CONFIG_HIGH_RES_TIMERS /* * High resolution timer enabled ? */ static bool hrtimer_hres_enabled __read_mostly = true; unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC; EXPORT_SYMBOL_GPL(hrtimer_resolution); /* * Enable / Disable high resolution mode */ static int __init setup_hrtimer_hres(char *str) { return (kstrtobool(str, &hrtimer_hres_enabled) == 0); } __setup("highres=", setup_hrtimer_hres); /* * hrtimer_high_res_enabled - query, if the highres mode is enabled */ static inline int hrtimer_is_hres_enabled(void) { return hrtimer_hres_enabled; } static void retrigger_next_event(void *arg); /* * Switch to high resolution mode */ static void hrtimer_switch_to_hres(void) { struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); if (tick_init_highres()) { pr_warn("Could not switch to high resolution mode on CPU %u\n", base->cpu); return; } base->hres_active = 1; hrtimer_resolution = HIGH_RES_NSEC; tick_setup_sched_timer(true); /* "Retrigger" the interrupt to get things going */ retrigger_next_event(NULL); } #else static inline int hrtimer_is_hres_enabled(void) { return 0; } static inline void hrtimer_switch_to_hres(void) { } #endif /* CONFIG_HIGH_RES_TIMERS */ /* * Retrigger next event is called after clock was set with interrupts * disabled through an SMP function call or directly from low level * resume code. * * This is only invoked when: * - CONFIG_HIGH_RES_TIMERS is enabled. * - CONFIG_NOHZ_COMMON is enabled * * For the other cases this function is empty and because the call sites * are optimized out it vanishes as well, i.e. no need for lots of * #ifdeffery. */ static void retrigger_next_event(void *arg) { struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); /* * When high resolution mode or nohz is active, then the offsets of * CLOCK_REALTIME/TAI/BOOTTIME have to be updated. Otherwise the * next tick will take care of that. * * If high resolution mode is active then the next expiring timer * must be reevaluated and the clock event device reprogrammed if * necessary. * * In the NOHZ case the update of the offset and the reevaluation * of the next expiring timer is enough. The return from the SMP * function call will take care of the reprogramming in case the * CPU was in a NOHZ idle sleep. */ if (!hrtimer_hres_active(base) && !tick_nohz_active) return; raw_spin_lock(&base->lock); hrtimer_update_base(base); if (hrtimer_hres_active(base)) hrtimer_force_reprogram(base, 0); else hrtimer_update_next_event(base); raw_spin_unlock(&base->lock); } /* * When a timer is enqueued and expires earlier than the already enqueued * timers, we have to check, whether it expires earlier than the timer for * which the clock event device was armed. * * Called with interrupts disabled and base->cpu_base.lock held */ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); struct hrtimer_clock_base *base = timer->base; ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); /* * CLOCK_REALTIME timer might be requested with an absolute * expiry time which is less than base->offset. Set it to 0. */ if (expires < 0) expires = 0; if (timer->is_soft) { /* * soft hrtimer could be started on a remote CPU. In this * case softirq_expires_next needs to be updated on the * remote CPU. The soft hrtimer will not expire before the * first hard hrtimer on the remote CPU - * hrtimer_check_target() prevents this case. */ struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; if (timer_cpu_base->softirq_activated) return; if (!ktime_before(expires, timer_cpu_base->softirq_expires_next)) return; timer_cpu_base->softirq_next_timer = timer; timer_cpu_base->softirq_expires_next = expires; if (!ktime_before(expires, timer_cpu_base->expires_next) || !reprogram) return; } /* * If the timer is not on the current cpu, we cannot reprogram * the other cpus clock event device. */ if (base->cpu_base != cpu_base) return; if (expires >= cpu_base->expires_next) return; /* * If the hrtimer interrupt is running, then it will reevaluate the * clock bases and reprogram the clock event device. */ if (cpu_base->in_hrtirq) return; cpu_base->next_timer = timer; __hrtimer_reprogram(cpu_base, timer, expires); } static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base, unsigned int active) { struct hrtimer_clock_base *base; unsigned int seq; ktime_t expires; /* * Update the base offsets unconditionally so the following * checks whether the SMP function call is required works. * * The update is safe even when the remote CPU is in the hrtimer * interrupt or the hrtimer soft interrupt and expiring affected * bases. Either it will see the update before handling a base or * it will see it when it finishes the processing and reevaluates * the next expiring timer. */ seq = cpu_base->clock_was_set_seq; hrtimer_update_base(cpu_base); /* * If the sequence did not change over the update then the * remote CPU already handled it. */ if (seq == cpu_base->clock_was_set_seq) return false; /* * If the remote CPU is currently handling an hrtimer interrupt, it * will reevaluate the first expiring timer of all clock bases * before reprogramming. Nothing to do here. */ if (cpu_base->in_hrtirq) return false; /* * Walk the affected clock bases and check whether the first expiring * timer in a clock base is moving ahead of the first expiring timer of * @cpu_base. If so, the IPI must be invoked because per CPU clock * event devices cannot be remotely reprogrammed. */ active &= cpu_base->active_bases; for_each_active_base(base, cpu_base, active) { struct timerqueue_node *next; next = timerqueue_getnext(&base->active); expires = ktime_sub(next->expires, base->offset); if (expires < cpu_base->expires_next) return true; /* Extra check for softirq clock bases */ if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) continue; if (cpu_base->softirq_activated) continue; if (expires < cpu_base->softirq_expires_next) return true; } return false; } /* * Clock was set. This might affect CLOCK_REALTIME, CLOCK_TAI and * CLOCK_BOOTTIME (for late sleep time injection). * * This requires to update the offsets for these clocks * vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this * also requires to eventually reprogram the per CPU clock event devices * when the change moves an affected timer ahead of the first expiring * timer on that CPU. Obviously remote per CPU clock event devices cannot * be reprogrammed. The other reason why an IPI has to be sent is when the * system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets * in the tick, which obviously might be stopped, so this has to bring out * the remote CPU which might sleep in idle to get this sorted. */ void clock_was_set(unsigned int bases) { struct hrtimer_cpu_base *cpu_base = raw_cpu_ptr(&hrtimer_bases); cpumask_var_t mask; int cpu; if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active) goto out_timerfd; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { on_each_cpu(retrigger_next_event, NULL, 1); goto out_timerfd; } /* Avoid interrupting CPUs if possible */ cpus_read_lock(); for_each_online_cpu(cpu) { unsigned long flags; cpu_base = &per_cpu(hrtimer_bases, cpu); raw_spin_lock_irqsave(&cpu_base->lock, flags); if (update_needs_ipi(cpu_base, bases)) cpumask_set_cpu(cpu, mask); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); } preempt_disable(); smp_call_function_many(mask, retrigger_next_event, NULL, 1); preempt_enable(); cpus_read_unlock(); free_cpumask_var(mask); out_timerfd: timerfd_clock_was_set(); } static void clock_was_set_work(struct work_struct *work) { clock_was_set(CLOCK_SET_WALL); } static DECLARE_WORK(hrtimer_work, clock_was_set_work); /* * Called from timekeeping code to reprogram the hrtimer interrupt device * on all cpus and to notify timerfd. */ void clock_was_set_delayed(void) { schedule_work(&hrtimer_work); } /* * Called during resume either directly from via timekeeping_resume() * or in the case of s2idle from tick_unfreeze() to ensure that the * hrtimers are up to date. */ void hrtimers_resume_local(void) { lockdep_assert_irqs_disabled(); /* Retrigger on the local CPU */ retrigger_next_event(NULL); } /* * Counterpart to lock_hrtimer_base above: */ static inline void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) __releases(&timer->base->cpu_base->lock) { raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); } /** * hrtimer_forward() - forward the timer expiry * @timer: hrtimer to forward * @now: forward past this time * @interval: the interval to forward * * Forward the timer expiry so it will expire in the future. * * .. note:: * This only updates the timer expiry value and does not requeue the timer. * * There is also a variant of the function hrtimer_forward_now(). * * Context: Can be safely called from the callback function of @timer. If called * from other contexts @timer must neither be enqueued nor running the * callback and the caller needs to take care of serialization. * * Return: The number of overruns are returned. */ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) { u64 orun = 1; ktime_t delta; delta = ktime_sub(now, hrtimer_get_expires(timer)); if (delta < 0) return 0; if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) return 0; if (interval < hrtimer_resolution) interval = hrtimer_resolution; if (unlikely(delta >= interval)) { s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); hrtimer_add_expires_ns(timer, incr * orun); if (hrtimer_get_expires_tv64(timer) > now) return orun; /* * This (and the ktime_add() below) is the * correction for exact: */ orun++; } hrtimer_add_expires(timer, interval); return orun; } EXPORT_SYMBOL_GPL(hrtimer_forward); /* * enqueue_hrtimer - internal function to (re)start a timer * * The timer is inserted in expiry order. Insertion into the * red black tree is O(log(n)). Must hold the base lock. * * Returns 1 when the new timer is the leftmost timer in the tree. */ static int enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, enum hrtimer_mode mode) { debug_activate(timer, mode); WARN_ON_ONCE(!base->cpu_base->online); base->cpu_base->active_bases |= 1 << base->index; /* Pairs with the lockless read in hrtimer_is_queued() */ WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED); return timerqueue_add(&base->active, &timer->node); } /* * __remove_hrtimer - internal function to remove a timer * * Caller must hold the base lock. * * High resolution timer mode reprograms the clock event device when the * timer is the one which expires next. The caller can disable this by setting * reprogram to zero. This is useful, when the context does a reprogramming * anyway (e.g. timer interrupt) */ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, u8 newstate, int reprogram) { struct hrtimer_cpu_base *cpu_base = base->cpu_base; u8 state = timer->state; /* Pairs with the lockless read in hrtimer_is_queued() */ WRITE_ONCE(timer->state, newstate); if (!(state & HRTIMER_STATE_ENQUEUED)) return; if (!timerqueue_del(&base->active, &timer->node)) cpu_base->active_bases &= ~(1 << base->index); /* * Note: If reprogram is false we do not update * cpu_base->next_timer. This happens when we remove the first * timer on a remote cpu. No harm as we never dereference * cpu_base->next_timer. So the worst thing what can happen is * an superfluous call to hrtimer_force_reprogram() on the * remote cpu later on if the same timer gets enqueued again. */ if (reprogram && timer == cpu_base->next_timer) hrtimer_force_reprogram(cpu_base, 1); } /* * remove hrtimer, called with base lock held */ static inline int remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart, bool keep_local) { u8 state = timer->state; if (state & HRTIMER_STATE_ENQUEUED) { bool reprogram; /* * Remove the timer and force reprogramming when high * resolution mode is active and the timer is on the current * CPU. If we remove a timer on another CPU, reprogramming is * skipped. The interrupt event on this CPU is fired and * reprogramming happens in the interrupt handler. This is a * rare case and less expensive than a smp call. */ debug_deactivate(timer); reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); /* * If the timer is not restarted then reprogramming is * required if the timer is local. If it is local and about * to be restarted, avoid programming it twice (on removal * and a moment later when it's requeued). */ if (!restart) state = HRTIMER_STATE_INACTIVE; else reprogram &= !keep_local; __remove_hrtimer(timer, base, state, reprogram); return 1; } return 0; } static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) { #ifdef CONFIG_TIME_LOW_RES /* * CONFIG_TIME_LOW_RES indicates that the system has no way to return * granular time values. For relative timers we add hrtimer_resolution * (i.e. one jiffy) to prevent short timeouts. */ timer->is_rel = mode & HRTIMER_MODE_REL; if (timer->is_rel) tim = ktime_add_safe(tim, hrtimer_resolution); #endif return tim; } static void hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram) { ktime_t expires; /* * Find the next SOFT expiration. */ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); /* * reprogramming needs to be triggered, even if the next soft * hrtimer expires at the same time than the next hard * hrtimer. cpu_base->softirq_expires_next needs to be updated! */ if (expires == KTIME_MAX) return; /* * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event() * cpu_base->*expires_next is only set by hrtimer_reprogram() */ hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram); } static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 delta_ns, const enum hrtimer_mode mode, struct hrtimer_clock_base *base) { struct hrtimer_clock_base *new_base; bool force_local, first; /* * If the timer is on the local cpu base and is the first expiring * timer then this might end up reprogramming the hardware twice * (on removal and on enqueue). To avoid that by prevent the * reprogram on removal, keep the timer local to the current CPU * and enforce reprogramming after it is queued no matter whether * it is the new first expiring timer again or not. */ force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases); force_local &= base->cpu_base->next_timer == timer; /* * Remove an active timer from the queue. In case it is not queued * on the current CPU, make sure that remove_hrtimer() updates the * remote data correctly. * * If it's on the current CPU and the first expiring timer, then * skip reprogramming, keep the timer local and enforce * reprogramming later if it was the first expiring timer. This * avoids programming the underlying clock event twice (once at * removal and once after enqueue). */ remove_hrtimer(timer, base, true, force_local); if (mode & HRTIMER_MODE_REL) tim = ktime_add_safe(tim, base->get_time()); tim = hrtimer_update_lowres(timer, tim, mode); hrtimer_set_expires_range_ns(timer, tim, delta_ns); /* Switch the timer base, if necessary: */ if (!force_local) { new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); } else { new_base = base; } first = enqueue_hrtimer(timer, new_base, mode); if (!force_local) return first; /* * Timer was forced to stay on the current CPU to avoid * reprogramming on removal and enqueue. Force reprogram the * hardware by evaluating the new first expiring timer. */ hrtimer_force_reprogram(new_base->cpu_base, 1); return 0; } /** * hrtimer_start_range_ns - (re)start an hrtimer * @timer: the timer to be added * @tim: expiry time * @delta_ns: "slack" range for the timer * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); * softirq based mode is considered for debug purpose only! */ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 delta_ns, const enum hrtimer_mode mode) { struct hrtimer_clock_base *base; unsigned long flags; if (WARN_ON_ONCE(!timer->function)) return; /* * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard * expiry mode because unmarked timers are moved to softirq expiry. */ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); else WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard); base = lock_hrtimer_base(timer, &flags); if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) hrtimer_reprogram(timer, true); unlock_hrtimer_base(timer, &flags); } EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); /** * hrtimer_try_to_cancel - try to deactivate a timer * @timer: hrtimer to stop * * Returns: * * * 0 when the timer was not active * * 1 when the timer was active * * -1 when the timer is currently executing the callback function and * cannot be stopped */ int hrtimer_try_to_cancel(struct hrtimer *timer) { struct hrtimer_clock_base *base; unsigned long flags; int ret = -1; /* * Check lockless first. If the timer is not active (neither * enqueued nor running the callback, nothing to do here. The * base lock does not serialize against a concurrent enqueue, * so we can avoid taking it. */ if (!hrtimer_active(timer)) return 0; base = lock_hrtimer_base(timer, &flags); if (!hrtimer_callback_running(timer)) ret = remove_hrtimer(timer, base, false, false); unlock_hrtimer_base(timer, &flags); return ret; } EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); #ifdef CONFIG_PREEMPT_RT static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { spin_lock_init(&base->softirq_expiry_lock); } static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) __acquires(&base->softirq_expiry_lock) { spin_lock(&base->softirq_expiry_lock); } static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) __releases(&base->softirq_expiry_lock) { spin_unlock(&base->softirq_expiry_lock); } /* * The counterpart to hrtimer_cancel_wait_running(). * * If there is a waiter for cpu_base->expiry_lock, then it was waiting for * the timer callback to finish. Drop expiry_lock and reacquire it. That * allows the waiter to acquire the lock and make progress. */ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base, unsigned long flags) { if (atomic_read(&cpu_base->timer_waiters)) { raw_spin_unlock_irqrestore(&cpu_base->lock, flags); spin_unlock(&cpu_base->softirq_expiry_lock); spin_lock(&cpu_base->softirq_expiry_lock); raw_spin_lock_irq(&cpu_base->lock); } } /* * This function is called on PREEMPT_RT kernels when the fast path * deletion of a timer failed because the timer callback function was * running. * * This prevents priority inversion: if the soft irq thread is preempted * in the middle of a timer callback, then calling del_timer_sync() can * lead to two issues: * * - If the caller is on a remote CPU then it has to spin wait for the timer * handler to complete. This can result in unbound priority inversion. * * - If the caller originates from the task which preempted the timer * handler on the same CPU, then spin waiting for the timer handler to * complete is never going to end. */ void hrtimer_cancel_wait_running(const struct hrtimer *timer) { /* Lockless read. Prevent the compiler from reloading it below */ struct hrtimer_clock_base *base = READ_ONCE(timer->base); /* * Just relax if the timer expires in hard interrupt context or if * it is currently on the migration base. */ if (!timer->is_soft || is_migration_base(base)) { cpu_relax(); return; } /* * Mark the base as contended and grab the expiry lock, which is * held by the softirq across the timer callback. Drop the lock * immediately so the softirq can expire the next timer. In theory * the timer could already be running again, but that's more than * unlikely and just causes another wait loop. */ atomic_inc(&base->cpu_base->timer_waiters); spin_lock_bh(&base->cpu_base->softirq_expiry_lock); atomic_dec(&base->cpu_base->timer_waiters); spin_unlock_bh(&base->cpu_base->softirq_expiry_lock); } #else static inline void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { } static inline void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { } static inline void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { } static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base, unsigned long flags) { } #endif /** * hrtimer_cancel - cancel a timer and wait for the handler to finish. * @timer: the timer to be cancelled * * Returns: * 0 when the timer was not active * 1 when the timer was active */ int hrtimer_cancel(struct hrtimer *timer) { int ret; do { ret = hrtimer_try_to_cancel(timer); if (ret < 0) hrtimer_cancel_wait_running(timer); } while (ret < 0); return ret; } EXPORT_SYMBOL_GPL(hrtimer_cancel); /** * __hrtimer_get_remaining - get remaining time for the timer * @timer: the timer to read * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y */ ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) { unsigned long flags; ktime_t rem; lock_hrtimer_base(timer, &flags); if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) rem = hrtimer_expires_remaining_adjusted(timer); else rem = hrtimer_expires_remaining(timer); unlock_hrtimer_base(timer, &flags); return rem; } EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); #ifdef CONFIG_NO_HZ_COMMON /** * hrtimer_get_next_event - get the time until next expiry event * * Returns the next expiry time or KTIME_MAX if no timer is pending. */ u64 hrtimer_get_next_event(void) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); u64 expires = KTIME_MAX; unsigned long flags; raw_spin_lock_irqsave(&cpu_base->lock, flags); if (!hrtimer_hres_active(cpu_base)) expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); return expires; } /** * hrtimer_next_event_without - time until next expiry event w/o one timer * @exclude: timer to exclude * * Returns the next expiry time over all timers except for the @exclude one or * KTIME_MAX if none of them is pending. */ u64 hrtimer_next_event_without(const struct hrtimer *exclude) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); u64 expires = KTIME_MAX; unsigned long flags; raw_spin_lock_irqsave(&cpu_base->lock, flags); if (hrtimer_hres_active(cpu_base)) { unsigned int active; if (!cpu_base->softirq_activated) { active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; expires = __hrtimer_next_event_base(cpu_base, exclude, active, KTIME_MAX); } active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; expires = __hrtimer_next_event_base(cpu_base, exclude, active, expires); } raw_spin_unlock_irqrestore(&cpu_base->lock, flags); return expires; } #endif static inline int hrtimer_clockid_to_base(clockid_t clock_id) { if (likely(clock_id < MAX_CLOCKS)) { int base = hrtimer_clock_to_base_table[clock_id]; if (likely(base != HRTIMER_MAX_CLOCK_BASES)) return base; } WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); return HRTIMER_BASE_MONOTONIC; } static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode) { bool softtimer = !!(mode & HRTIMER_MODE_SOFT); struct hrtimer_cpu_base *cpu_base; int base; /* * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context for latency reasons and because the callbacks * can invoke functions which might sleep on RT, e.g. spin_lock(). */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD)) softtimer = true; memset(timer, 0, sizeof(struct hrtimer)); cpu_base = raw_cpu_ptr(&hrtimer_bases); /* * POSIX magic: Relative CLOCK_REALTIME timers are not affected by * clock modifications, so they needs to become CLOCK_MONOTONIC to * ensure POSIX compliance. */ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL) clock_id = CLOCK_MONOTONIC; base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; base += hrtimer_clockid_to_base(clock_id); timer->is_soft = softtimer; timer->is_hard = !!(mode & HRTIMER_MODE_HARD); timer->base = &cpu_base->clock_base[base]; timerqueue_init(&timer->node); } /** * hrtimer_init - initialize a timer to the given clock * @timer: the timer to be initialized * @clock_id: the clock to be used * @mode: The modes which are relevant for initialization: * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT, * HRTIMER_MODE_REL_SOFT * * The PINNED variants of the above can be handed in, * but the PINNED bit is ignored as pinning happens * when the hrtimer is started */ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode) { debug_init(timer, clock_id, mode); __hrtimer_init(timer, clock_id, mode); } EXPORT_SYMBOL_GPL(hrtimer_init); /* * A timer is active, when it is enqueued into the rbtree or the * callback function is running or it's in the state of being migrated * to another cpu. * * It is important for this function to not return a false negative. */ bool hrtimer_active(const struct hrtimer *timer) { struct hrtimer_clock_base *base; unsigned int seq; do { base = READ_ONCE(timer->base); seq = raw_read_seqcount_begin(&base->seq); if (timer->state != HRTIMER_STATE_INACTIVE || base->running == timer) return true; } while (read_seqcount_retry(&base->seq, seq) || base != READ_ONCE(timer->base)); return false; } EXPORT_SYMBOL_GPL(hrtimer_active); /* * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 * distinct sections: * * - queued: the timer is queued * - callback: the timer is being ran * - post: the timer is inactive or (re)queued * * On the read side we ensure we observe timer->state and cpu_base->running * from the same section, if anything changed while we looked at it, we retry. * This includes timer->base changing because sequence numbers alone are * insufficient for that. * * The sequence numbers are required because otherwise we could still observe * a false negative if the read side got smeared over multiple consecutive * __run_hrtimer() invocations. */ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, struct hrtimer_clock_base *base, struct hrtimer *timer, ktime_t *now, unsigned long flags) __must_hold(&cpu_base->lock) { enum hrtimer_restart (*fn)(struct hrtimer *); bool expires_in_hardirq; int restart; lockdep_assert_held(&cpu_base->lock); debug_deactivate(timer); base->running = timer; /* * Separate the ->running assignment from the ->state assignment. * * As with a regular write barrier, this ensures the read side in * hrtimer_active() cannot observe base->running == NULL && * timer->state == INACTIVE. */ raw_write_seqcount_barrier(&base->seq); __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); fn = timer->function; /* * Clear the 'is relative' flag for the TIME_LOW_RES case. If the * timer is restarted with a period then it becomes an absolute * timer. If its not restarted it does not matter. */ if (IS_ENABLED(CONFIG_TIME_LOW_RES)) timer->is_rel = false; /* * The timer is marked as running in the CPU base, so it is * protected against migration to a different CPU even if the lock * is dropped. */ raw_spin_unlock_irqrestore(&cpu_base->lock, flags); trace_hrtimer_expire_entry(timer, now); expires_in_hardirq = lockdep_hrtimer_enter(timer); restart = fn(timer); lockdep_hrtimer_exit(expires_in_hardirq); trace_hrtimer_expire_exit(timer); raw_spin_lock_irq(&cpu_base->lock); /* * Note: We clear the running state after enqueue_hrtimer and * we do not reprogram the event hardware. Happens either in * hrtimer_start_range_ns() or in hrtimer_interrupt() * * Note: Because we dropped the cpu_base->lock above, * hrtimer_start_range_ns() can have popped in and enqueued the timer * for us already. */ if (restart != HRTIMER_NORESTART && !(timer->state & HRTIMER_STATE_ENQUEUED)) enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); /* * Separate the ->running assignment from the ->state assignment. * * As with a regular write barrier, this ensures the read side in * hrtimer_active() cannot observe base->running.timer == NULL && * timer->state == INACTIVE. */ raw_write_seqcount_barrier(&base->seq); WARN_ON_ONCE(base->running != timer); base->running = NULL; } static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, unsigned long flags, unsigned int active_mask) { struct hrtimer_clock_base *base; unsigned int active = cpu_base->active_bases & active_mask; for_each_active_base(base, cpu_base, active) { struct timerqueue_node *node; ktime_t basenow; basenow = ktime_add(now, base->offset); while ((node = timerqueue_getnext(&base->active))) { struct hrtimer *timer; timer = container_of(node, struct hrtimer, node); /* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the * earliest interrupt after their soft expiration. * This allows us to avoid using a Priority Search * Tree, which can answer a stabbing query for * overlapping intervals and instead use the simple * BST we already have. * We don't add extra wakeups by delaying timers that * are right-of a not yet expired timer, because that * timer will have to trigger a wakeup anyway. */ if (basenow < hrtimer_get_softexpires_tv64(timer)) break; __run_hrtimer(cpu_base, base, timer, &basenow, flags); if (active_mask == HRTIMER_ACTIVE_SOFT) hrtimer_sync_wait_running(cpu_base, flags); } } } static __latent_entropy void hrtimer_run_softirq(void) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); unsigned long flags; ktime_t now; hrtimer_cpu_base_lock_expiry(cpu_base); raw_spin_lock_irqsave(&cpu_base->lock, flags); now = hrtimer_update_base(cpu_base); __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT); cpu_base->softirq_activated = 0; hrtimer_update_softirq_timer(cpu_base, true); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); hrtimer_cpu_base_unlock_expiry(cpu_base); } #ifdef CONFIG_HIGH_RES_TIMERS /* * High resolution timer interrupt * Called with interrupts disabled */ void hrtimer_interrupt(struct clock_event_device *dev) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); ktime_t expires_next, now, entry_time, delta; unsigned long flags; int retries = 0; BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; dev->next_event = KTIME_MAX; raw_spin_lock_irqsave(&cpu_base->lock, flags); entry_time = now = hrtimer_update_base(cpu_base); retry: cpu_base->in_hrtirq = 1; /* * We set expires_next to KTIME_MAX here with cpu_base->lock * held to prevent that a timer is enqueued in our queue via * the migration code. This does not affect enqueueing of * timers which run their callback and need to be requeued on * this CPU. */ cpu_base->expires_next = KTIME_MAX; if (!ktime_before(now, cpu_base->softirq_expires_next)) { cpu_base->softirq_expires_next = KTIME_MAX; cpu_base->softirq_activated = 1; raise_softirq_irqoff(HRTIMER_SOFTIRQ); } __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); /* Reevaluate the clock bases for the [soft] next expiry */ expires_next = hrtimer_update_next_event(cpu_base); /* * Store the new expiry value so the migration code can verify * against it. */ cpu_base->expires_next = expires_next; cpu_base->in_hrtirq = 0; raw_spin_unlock_irqrestore(&cpu_base->lock, flags); /* Reprogramming necessary ? */ if (!tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; return; } /* * The next timer was already expired due to: * - tracing * - long lasting callbacks * - being scheduled away when running in a VM * * We need to prevent that we loop forever in the hrtimer * interrupt routine. We give it 3 attempts to avoid * overreacting on some spurious event. * * Acquire base lock for updating the offsets and retrieving * the current time. */ raw_spin_lock_irqsave(&cpu_base->lock, flags); now = hrtimer_update_base(cpu_base); cpu_base->nr_retries++; if (++retries < 3) goto retry; /* * Give the system a chance to do something else than looping * here. We stored the entry time, so we know exactly how long * we spent here. We schedule the next event this amount of * time away. */ cpu_base->nr_hangs++; cpu_base->hang_detected = 1; raw_spin_unlock_irqrestore(&cpu_base->lock, flags); delta = ktime_sub(now, entry_time); if ((unsigned int)delta > cpu_base->max_hang_time) cpu_base->max_hang_time = (unsigned int) delta; /* * Limit it to a sensible value as we enforce a longer * delay. Give the CPU at least 100ms to catch up. */ if (delta > 100 * NSEC_PER_MSEC) expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); else expires_next = ktime_add(now, delta); tick_program_event(expires_next, 1); pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); } #endif /* !CONFIG_HIGH_RES_TIMERS */ /* * Called from run_local_timers in hardirq context every jiffy */ void hrtimer_run_queues(void) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); unsigned long flags; ktime_t now; if (hrtimer_hres_active(cpu_base)) return; /* * This _is_ ugly: We have to check periodically, whether we * can switch to highres and / or nohz mode. The clocksource * switch happens with xtime_lock held. Notification from * there only sets the check bit in the tick_oneshot code, * otherwise we might deadlock vs. xtime_lock. */ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { hrtimer_switch_to_hres(); return; } raw_spin_lock_irqsave(&cpu_base->lock, flags); now = hrtimer_update_base(cpu_base); if (!ktime_before(now, cpu_base->softirq_expires_next)) { cpu_base->softirq_expires_next = KTIME_MAX; cpu_base->softirq_activated = 1; raise_softirq_irqoff(HRTIMER_SOFTIRQ); } __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); } /* * Sleep related functions: */ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) { struct hrtimer_sleeper *t = container_of(timer, struct hrtimer_sleeper, timer); struct task_struct *task = t->task; t->task = NULL; if (task) wake_up_process(task); return HRTIMER_NORESTART; } /** * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer * @sl: sleeper to be started * @mode: timer mode abs/rel * * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context) */ void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, enum hrtimer_mode mode) { /* * Make the enqueue delivery mode check work on RT. If the sleeper * was initialized for hard interrupt delivery, force the mode bit. * This is a special case for hrtimer_sleepers because * hrtimer_init_sleeper() determines the delivery mode on RT so the * fiddling with this decision is avoided at the call sites. */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) mode |= HRTIMER_MODE_HARD; hrtimer_start_expires(&sl->timer, mode); } EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires); static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, enum hrtimer_mode mode) { /* * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context either for latency reasons or because the * hrtimer callback takes regular spinlocks or invokes other * functions which are not suitable for hard interrupt context on * PREEMPT_RT. * * The hrtimer_sleeper callback is RT compatible in hard interrupt * context, but there is a latency concern: Untrusted userspace can * spawn many threads which arm timers for the same expiry time on * the same CPU. That causes a latency spike due to the wakeup of * a gazillion threads. * * OTOH, privileged real-time user space applications rely on the * low latency of hard interrupt wakeups. If the current task is in * a real-time scheduling class, mark the mode for hard interrupt * expiry. */ if (IS_ENABLED(CONFIG_PREEMPT_RT)) { if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT)) mode |= HRTIMER_MODE_HARD; } __hrtimer_init(&sl->timer, clock_id, mode); sl->timer.function = hrtimer_wakeup; sl->task = current; } /** * hrtimer_init_sleeper - initialize sleeper to the given clock * @sl: sleeper to be initialized * @clock_id: the clock to be used * @mode: timer mode abs/rel */ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, enum hrtimer_mode mode) { debug_init(&sl->timer, clock_id, mode); __hrtimer_init_sleeper(sl, clock_id, mode); } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) { switch(restart->nanosleep.type) { #ifdef CONFIG_COMPAT_32BIT_TIME case TT_COMPAT: if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp)) return -EFAULT; break; #endif case TT_NATIVE: if (put_timespec64(ts, restart->nanosleep.rmtp)) return -EFAULT; break; default: BUG(); } return -ERESTART_RESTARTBLOCK; } static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) { struct restart_block *restart; do { set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); hrtimer_sleeper_start_expires(t, mode); if (likely(t->task)) schedule(); hrtimer_cancel(&t->timer); mode = HRTIMER_MODE_ABS; } while (t->task && !signal_pending(current)); __set_current_state(TASK_RUNNING); if (!t->task) return 0; restart = ¤t->restart_block; if (restart->nanosleep.type != TT_NONE) { ktime_t rem = hrtimer_expires_remaining(&t->timer); struct timespec64 rmt; if (rem <= 0) return 0; rmt = ktime_to_timespec64(rem); return nanosleep_copyout(restart, &rmt); } return -ERESTART_RESTARTBLOCK; } static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) { struct hrtimer_sleeper t; int ret; hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid, HRTIMER_MODE_ABS); hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); ret = do_nanosleep(&t, HRTIMER_MODE_ABS); destroy_hrtimer_on_stack(&t.timer); return ret; } long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, const clockid_t clockid) { struct restart_block *restart; struct hrtimer_sleeper t; int ret = 0; hrtimer_init_sleeper_on_stack(&t, clockid, mode); hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns); ret = do_nanosleep(&t, mode); if (ret != -ERESTART_RESTARTBLOCK) goto out; /* Absolute timers do not update the rmtp value and restart: */ if (mode == HRTIMER_MODE_ABS) { ret = -ERESTARTNOHAND; goto out; } restart = ¤t->restart_block; restart->nanosleep.clockid = t.timer.base->clockid; restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); set_restart_fn(restart, hrtimer_nanosleep_restart); out: destroy_hrtimer_on_stack(&t.timer); return ret; } #ifdef CONFIG_64BIT SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp, struct __kernel_timespec __user *, rmtp) { struct timespec64 tu; if (get_timespec64(&tu, rqtp)) return -EFAULT; if (!timespec64_valid(&tu)) return -EINVAL; current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL, CLOCK_MONOTONIC); } #endif #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, struct old_timespec32 __user *, rmtp) { struct timespec64 tu; if (get_old_timespec32(&tu, rqtp)) return -EFAULT; if (!timespec64_valid(&tu)) return -EINVAL; current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL, CLOCK_MONOTONIC); } #endif /* * Functions related to boot-time initialization: */ int hrtimers_prepare_cpu(unsigned int cpu) { struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i]; clock_b->cpu_base = cpu_base; seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock); timerqueue_init_head(&clock_b->active); } cpu_base->cpu = cpu; cpu_base->active_bases = 0; cpu_base->hres_active = 0; cpu_base->hang_detected = 0; cpu_base->next_timer = NULL; cpu_base->softirq_next_timer = NULL; cpu_base->expires_next = KTIME_MAX; cpu_base->softirq_expires_next = KTIME_MAX; cpu_base->online = 1; hrtimer_cpu_base_init_expiry_lock(cpu_base); return 0; } #ifdef CONFIG_HOTPLUG_CPU static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, struct hrtimer_clock_base *new_base) { struct hrtimer *timer; struct timerqueue_node *node; while ((node = timerqueue_getnext(&old_base->active))) { timer = container_of(node, struct hrtimer, node); BUG_ON(hrtimer_callback_running(timer)); debug_deactivate(timer); /* * Mark it as ENQUEUED not INACTIVE otherwise the * timer could be seen as !active and just vanish away * under us on another CPU */ __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); timer->base = new_base; /* * Enqueue the timers on the new cpu. This does not * reprogram the event device in case the timer * expires before the earliest on this CPU, but we run * hrtimer_interrupt after we migrated everything to * sort out already expired timers and reprogram the * event device. */ enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS); } } int hrtimers_cpu_dying(unsigned int dying_cpu) { int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER)); struct hrtimer_cpu_base *old_base, *new_base; old_base = this_cpu_ptr(&hrtimer_bases); new_base = &per_cpu(hrtimer_bases, ncpu); /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. */ raw_spin_lock(&old_base->lock); raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { migrate_hrtimer_list(&old_base->clock_base[i], &new_base->clock_base[i]); } /* * The migration might have changed the first expiring softirq * timer on this CPU. Update it. */ __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT); /* Tell the other CPU to retrigger the next event */ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0); raw_spin_unlock(&new_base->lock); old_base->online = 0; raw_spin_unlock(&old_base->lock); return 0; } #endif /* CONFIG_HOTPLUG_CPU */ void __init hrtimers_init(void) { hrtimers_prepare_cpu(smp_processor_id()); open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq); } /** * schedule_hrtimeout_range_clock - sleep until timeout * @expires: timeout value (ktime_t) * @delta: slack in expires timeout (ktime_t) * @mode: timer mode * @clock_id: timer clock to be used */ int __sched schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, const enum hrtimer_mode mode, clockid_t clock_id) { struct hrtimer_sleeper t; /* * Optimize when a zero timeout value is given. It does not * matter whether this is an absolute or a relative time. */ if (expires && *expires == 0) { __set_current_state(TASK_RUNNING); return 0; } /* * A NULL parameter means "infinite" */ if (!expires) { schedule(); return -EINTR; } hrtimer_init_sleeper_on_stack(&t, clock_id, mode); hrtimer_set_expires_range_ns(&t.timer, *expires, delta); hrtimer_sleeper_start_expires(&t, mode); if (likely(t.task)) schedule(); hrtimer_cancel(&t.timer); destroy_hrtimer_on_stack(&t.timer); __set_current_state(TASK_RUNNING); return !t.task ? 0 : -EINTR; } EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock); /** * schedule_hrtimeout_range - sleep until timeout * @expires: timeout value (ktime_t) * @delta: slack in expires timeout (ktime_t) * @mode: timer mode * * Make the current task sleep until the given expiry time has * elapsed. The routine will return immediately unless * the current task state has been set (see set_current_state()). * * The @delta argument gives the kernel the freedom to schedule the * actual wakeup to a time that is both power and performance friendly * for regular (non RT/DL) tasks. * The kernel give the normal best effort behavior for "@expires+@delta", * but may decide to fire the timer earlier, but no earlier than @expires. * * You can set the task state as follows - * * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to * pass before the routine returns unless the current task is explicitly * woken up, (e.g. by wake_up_process()). * * %TASK_INTERRUPTIBLE - the routine may return early if a signal is * delivered to the current task or the current task is explicitly woken * up. * * The current task state is guaranteed to be TASK_RUNNING when this * routine returns. * * Returns 0 when the timer has expired. If the task was woken before the * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or * by an explicit wakeup, it returns -EINTR. */ int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, const enum hrtimer_mode mode) { return schedule_hrtimeout_range_clock(expires, delta, mode, CLOCK_MONOTONIC); } EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); /** * schedule_hrtimeout - sleep until timeout * @expires: timeout value (ktime_t) * @mode: timer mode * * Make the current task sleep until the given expiry time has * elapsed. The routine will return immediately unless * the current task state has been set (see set_current_state()). * * You can set the task state as follows - * * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to * pass before the routine returns unless the current task is explicitly * woken up, (e.g. by wake_up_process()). * * %TASK_INTERRUPTIBLE - the routine may return early if a signal is * delivered to the current task or the current task is explicitly woken * up. * * The current task state is guaranteed to be TASK_RUNNING when this * routine returns. * * Returns 0 when the timer has expired. If the task was woken before the * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or * by an explicit wakeup, it returns -EINTR. */ int __sched schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode) { return schedule_hrtimeout_range(expires, 0, mode); } EXPORT_SYMBOL_GPL(schedule_hrtimeout); |
1 1 1 1 4 2 3 1 1 1 1 1 1 1 1 4 4 2 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 | // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant linux driver for * * DM04/QQBOX DVB-S USB BOX LME2510C + SHARP:BS2F7HZ7395 * LME2510C + LG TDQY-P001F * LME2510C + BS2F7HZ0194 * LME2510 + LG TDQY-P001F * LME2510 + BS2F7HZ0194 * * MVB7395 (LME2510C+SHARP:BS2F7HZ7395) * SHARP:BS2F7HZ7395 = (STV0288+Sharp IX2505V) * * MV001F (LME2510+LGTDQY-P001F) * LG TDQY - P001F =(TDA8263 + TDA10086H) * * MVB0001F (LME2510C+LGTDQT-P001F) * * MV0194 (LME2510+SHARP:BS2F7HZ0194) * SHARP:BS2F7HZ0194 = (STV0299+IX2410) * * MVB0194 (LME2510C+SHARP0194) * * LME2510C + M88RS2000 * * For firmware see Documentation/admin-guide/media/lmedm04.rst * * I2C addresses: * 0xd0 - STV0288 - Demodulator * 0xc0 - Sharp IX2505V - Tuner * -- * 0x1c - TDA10086 - Demodulator * 0xc0 - TDA8263 - Tuner * -- * 0xd0 - STV0299 - Demodulator * 0xc0 - IX2410 - Tuner * * VID = 3344 PID LME2510=1122 LME2510C=1120 * * Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com) * LME2510(C)(C) Leaguerme (Shenzhen) MicroElectronics Co., Ltd. * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information * * Known Issues : * LME2510: Non Intel USB chipsets fail to maintain High Speed on * Boot or Hot Plug. * * QQbox suffers from noise on LNB voltage. * * LME2510: SHARP:BS2F7HZ0194(MV0194) cannot cold reset and share system * with other tuners. After a cold reset streaming will not start. * * M88RS2000 suffers from loss of lock. */ #define DVB_USB_LOG_PREFIX "LME2510(C)" #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #include "dvb_usb.h" #include "lmedm04.h" #include "tda826x.h" #include "tda10086.h" #include "stv0288.h" #include "ix2505v.h" #include "stv0299.h" #include "dvb-pll.h" #include "z0194a.h" #include "m88rs2000.h" #include "ts2020.h" #define LME2510_C_S7395 "dvb-usb-lme2510c-s7395.fw"; #define LME2510_C_LG "dvb-usb-lme2510c-lg.fw"; #define LME2510_C_S0194 "dvb-usb-lme2510c-s0194.fw"; #define LME2510_C_RS2000 "dvb-usb-lme2510c-rs2000.fw"; #define LME2510_LG "dvb-usb-lme2510-lg.fw"; #define LME2510_S0194 "dvb-usb-lme2510-s0194.fw"; /* debug */ static int dvb_usb_lme2510_debug; #define lme_debug(var, level, args...) do { \ if ((var >= level)) \ pr_debug(DVB_USB_LOG_PREFIX": " args); \ } while (0) #define deb_info(level, args...) lme_debug(dvb_usb_lme2510_debug, level, args) #define debug_data_snipet(level, name, p) \ deb_info(level, name" (%8phN)", p); #define info(args...) pr_info(DVB_USB_LOG_PREFIX": "args) module_param_named(debug, dvb_usb_lme2510_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."); static int dvb_usb_lme2510_firmware; module_param_named(firmware, dvb_usb_lme2510_firmware, int, 0644); MODULE_PARM_DESC(firmware, "set default firmware 0=Sharp7395 1=LG"); static int pid_filter; module_param_named(pid, pid_filter, int, 0644); MODULE_PARM_DESC(pid, "set default 0=default 1=off 2=on"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define TUNER_DEFAULT 0x0 #define TUNER_LG 0x1 #define TUNER_S7395 0x2 #define TUNER_S0194 0x3 #define TUNER_RS2000 0x4 struct lme2510_state { unsigned long int_urb_due; enum fe_status lock_status; u8 id; u8 tuner_config; u8 signal_level; u8 signal_sn; u8 time_key; u8 i2c_talk_onoff; u8 i2c_gate; u8 i2c_tuner_gate_w; u8 i2c_tuner_gate_r; u8 i2c_tuner_addr; u8 stream_on; u8 pid_size; u8 pid_off; u8 int_buffer[128]; struct urb *lme_urb; u8 usb_buffer[64]; /* Frontend original calls */ int (*fe_read_status)(struct dvb_frontend *, enum fe_status *); int (*fe_read_signal_strength)(struct dvb_frontend *, u16 *); int (*fe_read_snr)(struct dvb_frontend *, u16 *); int (*fe_read_ber)(struct dvb_frontend *, u32 *); int (*fe_read_ucblocks)(struct dvb_frontend *, u32 *); int (*fe_set_voltage)(struct dvb_frontend *, enum fe_sec_voltage); u8 dvb_usb_lme2510_firmware; }; static int lme2510_usb_talk(struct dvb_usb_device *d, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { struct lme2510_state *st = d->priv; int ret = 0; if (max(wlen, rlen) > sizeof(st->usb_buffer)) return -EINVAL; ret = mutex_lock_interruptible(&d->usb_mutex); if (ret < 0) return -EAGAIN; memcpy(st->usb_buffer, wbuf, wlen); ret = dvb_usbv2_generic_rw_locked(d, st->usb_buffer, wlen, st->usb_buffer, rlen); if (rlen) memcpy(rbuf, st->usb_buffer, rlen); mutex_unlock(&d->usb_mutex); return ret; } static int lme2510_stream_restart(struct dvb_usb_device *d) { struct lme2510_state *st = d->priv; u8 all_pids[] = LME_ALL_PIDS; u8 stream_on[] = LME_ST_ON_W; u8 rbuff[1]; if (st->pid_off) lme2510_usb_talk(d, all_pids, sizeof(all_pids), rbuff, sizeof(rbuff)); /*Restart Stream Command*/ return lme2510_usb_talk(d, stream_on, sizeof(stream_on), rbuff, sizeof(rbuff)); } static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) { struct lme2510_state *st = d->priv; static u8 pid_buff[] = LME_ZERO_PID; static u8 rbuf[1]; u8 pid_no = index * 2; u8 pid_len = pid_no + 2; int ret = 0; deb_info(1, "PID Setting Pid %04x", pid_out); if (st->pid_size == 0) ret |= lme2510_stream_restart(d); pid_buff[2] = pid_no; pid_buff[3] = (u8)pid_out & 0xff; pid_buff[4] = pid_no + 1; pid_buff[5] = (u8)(pid_out >> 8); if (pid_len > st->pid_size) st->pid_size = pid_len; pid_buff[7] = 0x80 + st->pid_size; ret |= lme2510_usb_talk(d, pid_buff , sizeof(pid_buff) , rbuf, sizeof(rbuf)); if (st->stream_on) ret |= lme2510_stream_restart(d); return ret; } /* Convert range from 0x00-0xff to 0x0000-0xffff */ #define reg_to_16bits(x) ((x) | ((x) << 8)) static void lme2510_update_stats(struct dvb_usb_adapter *adap) { struct lme2510_state *st = adap_to_priv(adap); struct dvb_frontend *fe = adap->fe[0]; struct dtv_frontend_properties *c; u32 s_tmp = 0, c_tmp = 0; if (!fe) return; c = &fe->dtv_property_cache; c->block_count.len = 1; c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->block_error.len = 1; c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_count.len = 1; c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_error.len = 1; c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; if (st->i2c_talk_onoff) { c->strength.len = 1; c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->cnr.len = 1; c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; return; } switch (st->tuner_config) { case TUNER_LG: s_tmp = reg_to_16bits(0xff - st->signal_level); c_tmp = reg_to_16bits(0xff - st->signal_sn); break; case TUNER_S7395: case TUNER_S0194: s_tmp = 0xffff - (((st->signal_level * 2) << 8) * 5 / 4); c_tmp = reg_to_16bits((0xff - st->signal_sn - 0xa1) * 3); break; case TUNER_RS2000: s_tmp = reg_to_16bits(st->signal_level); c_tmp = reg_to_16bits(st->signal_sn); } c->strength.len = 1; c->strength.stat[0].scale = FE_SCALE_RELATIVE; c->strength.stat[0].uvalue = (u64)s_tmp; c->cnr.len = 1; c->cnr.stat[0].scale = FE_SCALE_RELATIVE; c->cnr.stat[0].uvalue = (u64)c_tmp; } static void lme2510_int_response(struct urb *lme_urb) { struct dvb_usb_adapter *adap = lme_urb->context; struct lme2510_state *st = adap_to_priv(adap); u8 *ibuf, *rbuf; int i = 0, offset; u32 key; u8 signal_lock = 0; switch (lme_urb->status) { case 0: case -ETIMEDOUT: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: info("Error %x", lme_urb->status); break; } rbuf = (u8 *) lme_urb->transfer_buffer; offset = ((lme_urb->actual_length/8) > 4) ? 4 : (lme_urb->actual_length/8) ; for (i = 0; i < offset; ++i) { ibuf = (u8 *)&rbuf[i*8]; deb_info(5, "INT O/S C =%02x C/O=%02x Type =%02x%02x", offset, i, ibuf[0], ibuf[1]); switch (ibuf[0]) { case 0xaa: debug_data_snipet(1, "INT Remote data snippet", ibuf); if (!adap_to_d(adap)->rc_dev) break; key = RC_SCANCODE_NEC32(ibuf[2] << 24 | ibuf[3] << 16 | ibuf[4] << 8 | ibuf[5]); deb_info(1, "INT Key = 0x%08x", key); rc_keydown(adap_to_d(adap)->rc_dev, RC_PROTO_NEC32, key, 0); break; case 0xbb: switch (st->tuner_config) { case TUNER_LG: signal_lock = ibuf[2] & BIT(5); st->signal_level = ibuf[4]; st->signal_sn = ibuf[3]; st->time_key = ibuf[7]; break; case TUNER_S7395: case TUNER_S0194: /* Tweak for earlier firmware*/ if (ibuf[1] == 0x03) { signal_lock = ibuf[2] & BIT(4); st->signal_level = ibuf[3]; st->signal_sn = ibuf[4]; } else { st->signal_level = ibuf[4]; st->signal_sn = ibuf[5]; } break; case TUNER_RS2000: signal_lock = ibuf[2] & 0xee; st->signal_level = ibuf[5]; st->signal_sn = ibuf[4]; st->time_key = ibuf[7]; break; default: break; } /* Interrupt will also throw just BIT 0 as lock */ signal_lock |= ibuf[2] & BIT(0); if (!signal_lock) st->lock_status &= ~FE_HAS_LOCK; lme2510_update_stats(adap); debug_data_snipet(5, "INT Remote data snippet in", ibuf); break; case 0xcc: debug_data_snipet(1, "INT Control data snippet", ibuf); break; default: debug_data_snipet(1, "INT Unknown data snippet", ibuf); break; } } usb_submit_urb(lme_urb, GFP_ATOMIC); /* Interrupt urb is due every 48 msecs while streaming the buffer * stores up to 4 periods if missed. Allow 200 msec for next interrupt. */ st->int_urb_due = jiffies + msecs_to_jiffies(200); } static int lme2510_int_read(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *lme_int = adap_to_priv(adap); struct usb_host_endpoint *ep; lme_int->lme_urb = usb_alloc_urb(0, GFP_KERNEL); if (lme_int->lme_urb == NULL) return -ENOMEM; usb_fill_int_urb(lme_int->lme_urb, d->udev, usb_rcvintpipe(d->udev, 0xa), lme_int->int_buffer, sizeof(lme_int->int_buffer), lme2510_int_response, adap, 8); /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa); usb_submit_urb(lme_int->lme_urb, GFP_KERNEL); info("INT Interrupt Service Started"); return 0; } static int lme2510_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *st = adap_to_priv(adap); static u8 clear_pid_reg[] = LME_ALL_PIDS; static u8 rbuf[1]; int ret = 0; deb_info(1, "PID Clearing Filter"); mutex_lock(&d->i2c_mutex); if (!onoff) { ret |= lme2510_usb_talk(d, clear_pid_reg, sizeof(clear_pid_reg), rbuf, sizeof(rbuf)); st->pid_off = true; } else st->pid_off = false; st->pid_size = 0; mutex_unlock(&d->i2c_mutex); if (ret) return -EREMOTEIO; return 0; } static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { struct dvb_usb_device *d = adap_to_d(adap); int ret = 0; deb_info(3, "%s PID=%04x Index=%04x onoff=%02x", __func__, pid, index, onoff); if (onoff) { mutex_lock(&d->i2c_mutex); ret |= lme2510_enable_pid(d, index, pid); mutex_unlock(&d->i2c_mutex); } return ret; } static int lme2510_return_status(struct dvb_usb_device *d) { int ret; u8 *data; data = kzalloc(6, GFP_KERNEL); if (!data) return -ENOMEM; ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), 0x06, 0x80, 0x0302, 0x00, data, 0x6, 200); if (ret != 6) ret = -EINVAL; else ret = data[2]; info("Firmware Status: %6ph", data); kfree(data); return ret; } static int lme2510_msg(struct dvb_usb_device *d, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { struct lme2510_state *st = d->priv; st->i2c_talk_onoff = 1; return lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); } static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct lme2510_state *st = d->priv; static u8 obuf[64], ibuf[64]; int i, read, read_o; u16 len; u8 gate; mutex_lock(&d->i2c_mutex); for (i = 0; i < num; i++) { read_o = msg[i].flags & I2C_M_RD; read = i + 1 < num && msg[i + 1].flags & I2C_M_RD; read |= read_o; gate = (msg[i].addr == st->i2c_tuner_addr) ? (read) ? st->i2c_tuner_gate_r : st->i2c_tuner_gate_w : st->i2c_gate; obuf[0] = gate | (read << 7); if (gate == 5) obuf[1] = (read) ? 2 : msg[i].len + 1; else obuf[1] = msg[i].len + read + 1; obuf[2] = msg[i].addr << 1; if (read) { if (read_o) len = 3; else { memcpy(&obuf[3], msg[i].buf, msg[i].len); obuf[msg[i].len+3] = msg[i+1].len; len = msg[i].len+4; } } else { memcpy(&obuf[3], msg[i].buf, msg[i].len); len = msg[i].len+3; } if (lme2510_msg(d, obuf, len, ibuf, 64) < 0) { deb_info(1, "i2c transfer failed."); mutex_unlock(&d->i2c_mutex); return -EAGAIN; } if (read) { if (read_o) memcpy(msg[i].buf, &ibuf[1], msg[i].len); else { memcpy(msg[i+1].buf, &ibuf[1], msg[i+1].len); i++; } } } mutex_unlock(&d->i2c_mutex); return i; } static u32 lme2510_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm lme2510_i2c_algo = { .master_xfer = lme2510_i2c_xfer, .functionality = lme2510_i2c_func, }; static int lme2510_streaming_ctrl(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe_to_adap(fe); struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *st = adap_to_priv(adap); static u8 clear_reg_3[] = LME_ALL_PIDS; static u8 rbuf[1]; int ret = 0, rlen = sizeof(rbuf); deb_info(1, "STM (%02x)", onoff); /* Streaming is started by FE_HAS_LOCK */ if (onoff == 1) st->stream_on = 1; else { deb_info(1, "STM Steam Off"); /* mutex is here only to avoid collision with I2C */ mutex_lock(&d->i2c_mutex); ret = lme2510_usb_talk(d, clear_reg_3, sizeof(clear_reg_3), rbuf, rlen); st->stream_on = 0; st->i2c_talk_onoff = 1; mutex_unlock(&d->i2c_mutex); } return (ret < 0) ? -ENODEV : 0; } static u8 check_sum(u8 *p, u8 len) { u8 sum = 0; while (len--) sum += *p++; return sum; } static int lme2510_download_firmware(struct dvb_usb_device *d, const struct firmware *fw) { int ret = 0; u8 *data; u16 j, wlen, len_in, start, end; u8 packet_size, dlen, i; u8 *fw_data; packet_size = 0x31; len_in = 1; data = kzalloc(128, GFP_KERNEL); if (!data) { info("FRM Could not start Firmware Download"\ "(Buffer allocation failed)"); return -ENOMEM; } info("FRM Starting Firmware Download"); for (i = 1; i < 3; i++) { start = (i == 1) ? 0 : 512; end = (i == 1) ? 512 : fw->size; for (j = start; j < end; j += (packet_size+1)) { fw_data = (u8 *)(fw->data + j); if ((end - j) > packet_size) { data[0] = i; dlen = packet_size; } else { data[0] = i | 0x80; dlen = (u8)(end - j)-1; } data[1] = dlen; memcpy(&data[2], fw_data, dlen+1); wlen = (u8) dlen + 4; data[wlen-1] = check_sum(fw_data, dlen+1); deb_info(1, "Data S=%02x:E=%02x CS= %02x", data[3], data[dlen+2], data[dlen+3]); lme2510_usb_talk(d, data, wlen, data, len_in); ret |= (data[0] == 0x88) ? 0 : -1; } } data[0] = 0x8a; len_in = 1; msleep(2000); lme2510_usb_talk(d, data, len_in, data, len_in); msleep(400); if (ret < 0) info("FRM Firmware Download Failed (%04x)" , ret); else info("FRM Firmware Download Completed - Resetting Device"); kfree(data); return RECONNECTS_USB; } static void lme_coldreset(struct dvb_usb_device *d) { u8 data[1] = {0}; data[0] = 0x0a; info("FRM Firmware Cold Reset"); lme2510_usb_talk(d, data, sizeof(data), data, sizeof(data)); return; } static const char fw_c_s7395[] = LME2510_C_S7395; static const char fw_c_lg[] = LME2510_C_LG; static const char fw_c_s0194[] = LME2510_C_S0194; static const char fw_c_rs2000[] = LME2510_C_RS2000; static const char fw_lg[] = LME2510_LG; static const char fw_s0194[] = LME2510_S0194; static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold) { struct lme2510_state *st = d->priv; struct usb_device *udev = d->udev; const struct firmware *fw = NULL; const char *fw_lme; int ret = 0; cold = (cold > 0) ? (cold & 1) : 0; switch (le16_to_cpu(udev->descriptor.idProduct)) { case 0x1122: switch (st->dvb_usb_lme2510_firmware) { default: case TUNER_S0194: fw_lme = fw_s0194; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { st->dvb_usb_lme2510_firmware = TUNER_S0194; cold = 0; break; } fallthrough; case TUNER_LG: fw_lme = fw_lg; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { st->dvb_usb_lme2510_firmware = TUNER_LG; break; } st->dvb_usb_lme2510_firmware = TUNER_DEFAULT; break; } break; case 0x1120: switch (st->dvb_usb_lme2510_firmware) { default: case TUNER_S7395: fw_lme = fw_c_s7395; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { st->dvb_usb_lme2510_firmware = TUNER_S7395; cold = 0; break; } fallthrough; case TUNER_LG: fw_lme = fw_c_lg; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { st->dvb_usb_lme2510_firmware = TUNER_LG; break; } fallthrough; case TUNER_S0194: fw_lme = fw_c_s0194; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { st->dvb_usb_lme2510_firmware = TUNER_S0194; break; } st->dvb_usb_lme2510_firmware = TUNER_DEFAULT; cold = 0; break; } break; case 0x22f0: fw_lme = fw_c_rs2000; st->dvb_usb_lme2510_firmware = TUNER_RS2000; break; default: fw_lme = fw_c_s7395; } release_firmware(fw); if (cold) { dvb_usb_lme2510_firmware = st->dvb_usb_lme2510_firmware; info("FRM Changing to %s firmware", fw_lme); lme_coldreset(d); return NULL; } return fw_lme; } static struct tda10086_config tda10086_config = { .demod_address = 0x0e, .invert = 0, .diseqc_tone = 1, .xtal_freq = TDA10086_XTAL_16M, }; static struct stv0288_config lme_config = { .demod_address = 0x68, .min_delay_ms = 15, .inittab = s7395_inittab, }; static struct ix2505v_config lme_tuner = { .tuner_address = 0x60, .min_delay_ms = 100, .tuner_gain = 0x0, .tuner_chargepump = 0x3, }; static struct stv0299_config sharp_z0194_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct m88rs2000_config m88rs2000_config = { .demod_addr = 0x68 }; static struct ts2020_config ts2020_config = { .tuner_address = 0x60, .clk_out_div = 7, .dont_poll = true }; static int dm04_lme2510_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { struct dvb_usb_device *d = fe_to_d(fe); struct lme2510_state *st = fe_to_priv(fe); static u8 voltage_low[] = LME_VOLTAGE_L; static u8 voltage_high[] = LME_VOLTAGE_H; static u8 rbuf[1]; int ret = 0, len = 3, rlen = 1; mutex_lock(&d->i2c_mutex); switch (voltage) { case SEC_VOLTAGE_18: ret |= lme2510_usb_talk(d, voltage_high, len, rbuf, rlen); break; case SEC_VOLTAGE_OFF: case SEC_VOLTAGE_13: default: ret |= lme2510_usb_talk(d, voltage_low, len, rbuf, rlen); break; } mutex_unlock(&d->i2c_mutex); if (st->tuner_config == TUNER_RS2000) if (st->fe_set_voltage) st->fe_set_voltage(fe, voltage); return (ret < 0) ? -ENODEV : 0; } static int dm04_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct dvb_usb_device *d = fe_to_d(fe); struct lme2510_state *st = d->priv; int ret = 0; if (st->i2c_talk_onoff) { if (st->fe_read_status) { ret = st->fe_read_status(fe, status); if (ret < 0) return ret; } st->lock_status = *status; if (*status & FE_HAS_LOCK && st->stream_on) { mutex_lock(&d->i2c_mutex); st->i2c_talk_onoff = 0; ret = lme2510_stream_restart(d); mutex_unlock(&d->i2c_mutex); } return ret; } /* Timeout of interrupt reached on RS2000 */ if (st->tuner_config == TUNER_RS2000 && time_after(jiffies, st->int_urb_due)) st->lock_status &= ~FE_HAS_LOCK; *status = st->lock_status; if (!(*status & FE_HAS_LOCK)) { struct dvb_usb_adapter *adap = fe_to_adap(fe); st->i2c_talk_onoff = 1; lme2510_update_stats(adap); } return ret; } static int dm04_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct lme2510_state *st = fe_to_priv(fe); if (st->fe_read_signal_strength && !st->stream_on) return st->fe_read_signal_strength(fe, strength); if (c->strength.stat[0].scale == FE_SCALE_RELATIVE) *strength = (u16)c->strength.stat[0].uvalue; else *strength = 0; return 0; } static int dm04_read_snr(struct dvb_frontend *fe, u16 *snr) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct lme2510_state *st = fe_to_priv(fe); if (st->fe_read_snr && !st->stream_on) return st->fe_read_snr(fe, snr); if (c->cnr.stat[0].scale == FE_SCALE_RELATIVE) *snr = (u16)c->cnr.stat[0].uvalue; else *snr = 0; return 0; } static int dm04_read_ber(struct dvb_frontend *fe, u32 *ber) { struct lme2510_state *st = fe_to_priv(fe); if (st->fe_read_ber && !st->stream_on) return st->fe_read_ber(fe, ber); *ber = 0; return 0; } static int dm04_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct lme2510_state *st = fe_to_priv(fe); if (st->fe_read_ucblocks && !st->stream_on) return st->fe_read_ucblocks(fe, ucblocks); *ucblocks = 0; return 0; } static int lme_name(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *st = adap_to_priv(adap); const char *desc = d->name; static const char * const fe_name[] = { "", " LG TDQY-P001F", " SHARP:BS2F7HZ7395", " SHARP:BS2F7HZ0194", " RS2000"}; char *name = adap->fe[0]->ops.info.name; strscpy(name, desc, 128); strlcat(name, fe_name[st->tuner_config], 128); return 0; } static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *st = d->priv; int ret = 0; st->i2c_talk_onoff = 1; switch (le16_to_cpu(d->udev->descriptor.idProduct)) { case 0x1122: case 0x1120: st->i2c_gate = 4; adap->fe[0] = dvb_attach(tda10086_attach, &tda10086_config, &d->i2c_adap); if (adap->fe[0]) { info("TUN Found Frontend TDA10086"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 4; st->i2c_tuner_addr = 0x60; st->tuner_config = TUNER_LG; if (st->dvb_usb_lme2510_firmware != TUNER_LG) { st->dvb_usb_lme2510_firmware = TUNER_LG; ret = lme_firmware_switch(d, 1) ? 0 : -ENODEV; } break; } st->i2c_gate = 4; adap->fe[0] = dvb_attach(stv0299_attach, &sharp_z0194_config, &d->i2c_adap); if (adap->fe[0]) { info("FE Found Stv0299"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0x60; st->tuner_config = TUNER_S0194; if (st->dvb_usb_lme2510_firmware != TUNER_S0194) { st->dvb_usb_lme2510_firmware = TUNER_S0194; ret = lme_firmware_switch(d, 1) ? 0 : -ENODEV; } break; } st->i2c_gate = 5; adap->fe[0] = dvb_attach(stv0288_attach, &lme_config, &d->i2c_adap); if (adap->fe[0]) { info("FE Found Stv0288"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0x60; st->tuner_config = TUNER_S7395; if (st->dvb_usb_lme2510_firmware != TUNER_S7395) { st->dvb_usb_lme2510_firmware = TUNER_S7395; ret = lme_firmware_switch(d, 1) ? 0 : -ENODEV; } break; } fallthrough; case 0x22f0: st->i2c_gate = 5; adap->fe[0] = dvb_attach(m88rs2000_attach, &m88rs2000_config, &d->i2c_adap); if (adap->fe[0]) { info("FE Found M88RS2000"); st->i2c_tuner_gate_w = 5; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0x60; st->tuner_config = TUNER_RS2000; st->fe_set_voltage = adap->fe[0]->ops.set_voltage; } break; } if (adap->fe[0] == NULL) { info("DM04/QQBOX Not Powered up or not Supported"); return -ENODEV; } if (ret) { if (adap->fe[0]) { dvb_frontend_detach(adap->fe[0]); adap->fe[0] = NULL; } d->rc_map = NULL; return -ENODEV; } st->fe_read_status = adap->fe[0]->ops.read_status; st->fe_read_signal_strength = adap->fe[0]->ops.read_signal_strength; st->fe_read_snr = adap->fe[0]->ops.read_snr; st->fe_read_ber = adap->fe[0]->ops.read_ber; st->fe_read_ucblocks = adap->fe[0]->ops.read_ucblocks; adap->fe[0]->ops.read_status = dm04_read_status; adap->fe[0]->ops.read_signal_strength = dm04_read_signal_strength; adap->fe[0]->ops.read_snr = dm04_read_snr; adap->fe[0]->ops.read_ber = dm04_read_ber; adap->fe[0]->ops.read_ucblocks = dm04_read_ucblocks; adap->fe[0]->ops.set_voltage = dm04_lme2510_set_voltage; ret = lme_name(adap); return ret; } static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *st = adap_to_priv(adap); static const char * const tun_msg[] = {"", "TDA8263", "IX2505V", "DVB_PLL_OPERA", "RS2000"}; int ret = 0; switch (st->tuner_config) { case TUNER_LG: if (dvb_attach(tda826x_attach, adap->fe[0], 0x60, &d->i2c_adap, 1)) ret = st->tuner_config; break; case TUNER_S7395: if (dvb_attach(ix2505v_attach , adap->fe[0], &lme_tuner, &d->i2c_adap)) ret = st->tuner_config; break; case TUNER_S0194: if (dvb_attach(dvb_pll_attach , adap->fe[0], 0x60, &d->i2c_adap, DVB_PLL_OPERA1)) ret = st->tuner_config; break; case TUNER_RS2000: if (dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config, &d->i2c_adap)) ret = st->tuner_config; break; default: break; } if (ret) { info("TUN Found %s tuner", tun_msg[ret]); } else { info("TUN No tuner found"); return -ENODEV; } /* Start the Interrupt*/ ret = lme2510_int_read(adap); if (ret < 0) { info("INT Unable to start Interrupt Service"); return -ENODEV; } return ret; } static int lme2510_powerup(struct dvb_usb_device *d, int onoff) { struct lme2510_state *st = d->priv; static u8 lnb_on[] = LNB_ON; static u8 lnb_off[] = LNB_OFF; static u8 rbuf[1]; int ret = 0, len = 3, rlen = 1; mutex_lock(&d->i2c_mutex); ret = lme2510_usb_talk(d, onoff ? lnb_on : lnb_off, len, rbuf, rlen); st->i2c_talk_onoff = 1; mutex_unlock(&d->i2c_mutex); return ret; } static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) { struct lme2510_state *st = d->priv; int status; usb_reset_configuration(d->udev); usb_set_interface(d->udev, d->props->bInterfaceNumber, 1); st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; status = lme2510_return_status(d); if (status == 0x44) { *name = lme_firmware_switch(d, 0); return COLD; } if (status != 0x47) return -EINVAL; return WARM; } static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, struct usb_data_stream_properties *stream) { struct dvb_usb_adapter *adap = fe_to_adap(fe); struct dvb_usb_device *d; if (adap == NULL) return 0; d = adap_to_d(adap); /* Turn PID filter on the fly by module option */ if (pid_filter == 2) { adap->pid_filtering = true; adap->max_feed_count = 15; } if (!(le16_to_cpu(d->udev->descriptor.idProduct) == 0x1122)) stream->endpoint = 0x8; return 0; } static int lme2510_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { rc->allowed_protos = RC_PROTO_BIT_NEC32; return 0; } static void lme2510_exit(struct dvb_usb_device *d) { struct lme2510_state *st = d->priv; if (st->lme_urb) { usb_kill_urb(st->lme_urb); usb_free_urb(st->lme_urb); info("Interrupt Service Stopped"); } } static struct dvb_usb_device_properties lme2510_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .bInterfaceNumber = 0, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct lme2510_state), .generic_bulk_ctrl_endpoint = 0x01, .generic_bulk_ctrl_endpoint_response = 0x01, .download_firmware = lme2510_download_firmware, .power_ctrl = lme2510_powerup, .identify_state = lme2510_identify_state, .i2c_algo = &lme2510_i2c_algo, .frontend_attach = dm04_lme2510_frontend_attach, .tuner_attach = dm04_lme2510_tuner, .get_stream_config = lme2510_get_stream_config, .streaming_ctrl = lme2510_streaming_ctrl, .get_rc_config = lme2510_get_rc_config, .exit = lme2510_exit, .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER| DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 15, .pid_filter = lme2510_pid_filter, .pid_filter_ctrl = lme2510_pid_filter_ctrl, .stream = DVB_USB_STREAM_BULK(0x86, 10, 4096), }, }, }; static const struct usb_device_id lme2510_id_table[] = { { DVB_USB_DEVICE(0x3344, 0x1122, &lme2510_props, "DM04_LME2510_DVB-S", RC_MAP_LME2510) }, { DVB_USB_DEVICE(0x3344, 0x1120, &lme2510_props, "DM04_LME2510C_DVB-S", RC_MAP_LME2510) }, { DVB_USB_DEVICE(0x3344, 0x22f0, &lme2510_props, "DM04_LME2510C_DVB-S RS2000", RC_MAP_LME2510) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, lme2510_id_table); static struct usb_driver lme2510_driver = { .name = KBUILD_MODNAME, .probe = dvb_usbv2_probe, .disconnect = dvb_usbv2_disconnect, .id_table = lme2510_id_table, .no_dynamic_id = 1, .soft_unbind = 1, }; module_usb_driver(lme2510_driver); MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); MODULE_VERSION("2.07"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(LME2510_C_S7395); MODULE_FIRMWARE(LME2510_C_LG); MODULE_FIRMWARE(LME2510_C_S0194); MODULE_FIRMWARE(LME2510_C_RS2000); MODULE_FIRMWARE(LME2510_LG); MODULE_FIRMWARE(LME2510_S0194); |
2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 2 2 2 1 1 1 2 3 3 3 3 1 2 2 2 2 2 2 2 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 | // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for Nintendo Switch Joy-Cons and Pro Controllers * * Copyright (c) 2019-2021 Daniel J. Ogorchock <djogorchock@gmail.com> * Portions Copyright (c) 2020 Nadia Holmquist Pedersen <nadia@nhp.sh> * Copyright (c) 2022 Emily Strickland <linux@emily.st> * Copyright (c) 2023 Ryan McClelland <rymcclel@gmail.com> * * The following resources/projects were referenced for this driver: * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering * https://gitlab.com/pjranki/joycon-linux-kernel (Peter Rankin) * https://github.com/FrotBot/SwitchProConLinuxUSB * https://github.com/MTCKC/ProconXInput * https://github.com/Davidobot/BetterJoyForCemu * hid-wiimote kernel hid driver * hid-logitech-hidpp driver * hid-sony driver * * This driver supports the Nintendo Switch Joy-Cons and Pro Controllers. The * Pro Controllers can either be used over USB or Bluetooth. * * This driver also incorporates support for Nintendo Switch Online controllers * for the NES, SNES, Sega Genesis, and N64. * * The driver will retrieve the factory calibration info from the controllers, * so little to no user calibration should be required. * */ #include "hid-ids.h" #include <linux/unaligned.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/hid.h> #include <linux/idr.h> #include <linux/input.h> #include <linux/jiffies.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/spinlock.h> /* * Reference the url below for the following HID report defines: * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering */ /* Output Reports */ #define JC_OUTPUT_RUMBLE_AND_SUBCMD 0x01 #define JC_OUTPUT_FW_UPDATE_PKT 0x03 #define JC_OUTPUT_RUMBLE_ONLY 0x10 #define JC_OUTPUT_MCU_DATA 0x11 #define JC_OUTPUT_USB_CMD 0x80 /* Subcommand IDs */ #define JC_SUBCMD_STATE 0x00 #define JC_SUBCMD_MANUAL_BT_PAIRING 0x01 #define JC_SUBCMD_REQ_DEV_INFO 0x02 #define JC_SUBCMD_SET_REPORT_MODE 0x03 #define JC_SUBCMD_TRIGGERS_ELAPSED 0x04 #define JC_SUBCMD_GET_PAGE_LIST_STATE 0x05 #define JC_SUBCMD_SET_HCI_STATE 0x06 #define JC_SUBCMD_RESET_PAIRING_INFO 0x07 #define JC_SUBCMD_LOW_POWER_MODE 0x08 #define JC_SUBCMD_SPI_FLASH_READ 0x10 #define JC_SUBCMD_SPI_FLASH_WRITE 0x11 #define JC_SUBCMD_RESET_MCU 0x20 #define JC_SUBCMD_SET_MCU_CONFIG 0x21 #define JC_SUBCMD_SET_MCU_STATE 0x22 #define JC_SUBCMD_SET_PLAYER_LIGHTS 0x30 #define JC_SUBCMD_GET_PLAYER_LIGHTS 0x31 #define JC_SUBCMD_SET_HOME_LIGHT 0x38 #define JC_SUBCMD_ENABLE_IMU 0x40 #define JC_SUBCMD_SET_IMU_SENSITIVITY 0x41 #define JC_SUBCMD_WRITE_IMU_REG 0x42 #define JC_SUBCMD_READ_IMU_REG 0x43 #define JC_SUBCMD_ENABLE_VIBRATION 0x48 #define JC_SUBCMD_GET_REGULATED_VOLTAGE 0x50 /* Input Reports */ #define JC_INPUT_BUTTON_EVENT 0x3F #define JC_INPUT_SUBCMD_REPLY 0x21 #define JC_INPUT_IMU_DATA 0x30 #define JC_INPUT_MCU_DATA 0x31 #define JC_INPUT_USB_RESPONSE 0x81 /* Feature Reports */ #define JC_FEATURE_LAST_SUBCMD 0x02 #define JC_FEATURE_OTA_FW_UPGRADE 0x70 #define JC_FEATURE_SETUP_MEM_READ 0x71 #define JC_FEATURE_MEM_READ 0x72 #define JC_FEATURE_ERASE_MEM_SECTOR 0x73 #define JC_FEATURE_MEM_WRITE 0x74 #define JC_FEATURE_LAUNCH 0x75 /* USB Commands */ #define JC_USB_CMD_CONN_STATUS 0x01 #define JC_USB_CMD_HANDSHAKE 0x02 #define JC_USB_CMD_BAUDRATE_3M 0x03 #define JC_USB_CMD_NO_TIMEOUT 0x04 #define JC_USB_CMD_EN_TIMEOUT 0x05 #define JC_USB_RESET 0x06 #define JC_USB_PRE_HANDSHAKE 0x91 #define JC_USB_SEND_UART 0x92 /* Magic value denoting presence of user calibration */ #define JC_CAL_USR_MAGIC_0 0xB2 #define JC_CAL_USR_MAGIC_1 0xA1 #define JC_CAL_USR_MAGIC_SIZE 2 /* SPI storage addresses of user calibration data */ #define JC_CAL_USR_LEFT_MAGIC_ADDR 0x8010 #define JC_CAL_USR_LEFT_DATA_ADDR 0x8012 #define JC_CAL_USR_LEFT_DATA_END 0x801A #define JC_CAL_USR_RIGHT_MAGIC_ADDR 0x801B #define JC_CAL_USR_RIGHT_DATA_ADDR 0x801D #define JC_CAL_STICK_DATA_SIZE \ (JC_CAL_USR_LEFT_DATA_END - JC_CAL_USR_LEFT_DATA_ADDR + 1) /* SPI storage addresses of factory calibration data */ #define JC_CAL_FCT_DATA_LEFT_ADDR 0x603d #define JC_CAL_FCT_DATA_RIGHT_ADDR 0x6046 /* SPI storage addresses of IMU factory calibration data */ #define JC_IMU_CAL_FCT_DATA_ADDR 0x6020 #define JC_IMU_CAL_FCT_DATA_END 0x6037 #define JC_IMU_CAL_DATA_SIZE \ (JC_IMU_CAL_FCT_DATA_END - JC_IMU_CAL_FCT_DATA_ADDR + 1) /* SPI storage addresses of IMU user calibration data */ #define JC_IMU_CAL_USR_MAGIC_ADDR 0x8026 #define JC_IMU_CAL_USR_DATA_ADDR 0x8028 /* The raw analog joystick values will be mapped in terms of this magnitude */ #define JC_MAX_STICK_MAG 32767 #define JC_STICK_FUZZ 250 #define JC_STICK_FLAT 500 /* Hat values for pro controller's d-pad */ #define JC_MAX_DPAD_MAG 1 #define JC_DPAD_FUZZ 0 #define JC_DPAD_FLAT 0 /* Under most circumstances IMU reports are pushed every 15ms; use as default */ #define JC_IMU_DFLT_AVG_DELTA_MS 15 /* How many samples to sum before calculating average IMU report delta */ #define JC_IMU_SAMPLES_PER_DELTA_AVG 300 /* Controls how many dropped IMU packets at once trigger a warning message */ #define JC_IMU_DROPPED_PKT_WARNING 3 /* * The controller's accelerometer has a sensor resolution of 16bits and is * configured with a range of +-8000 milliGs. Therefore, the resolution can be * calculated thus: (2^16-1)/(8000 * 2) = 4.096 digits per milliG * Resolution per G (rather than per millliG): 4.096 * 1000 = 4096 digits per G * Alternatively: 1/4096 = .0002441 Gs per digit */ #define JC_IMU_MAX_ACCEL_MAG 32767 #define JC_IMU_ACCEL_RES_PER_G 4096 #define JC_IMU_ACCEL_FUZZ 10 #define JC_IMU_ACCEL_FLAT 0 /* * The controller's gyroscope has a sensor resolution of 16bits and is * configured with a range of +-2000 degrees/second. * Digits per dps: (2^16 -1)/(2000*2) = 16.38375 * dps per digit: 16.38375E-1 = .0610 * * STMicro recommends in the datasheet to add 15% to the dps/digit. This allows * the full sensitivity range to be saturated without clipping. This yields more * accurate results, so it's the technique this driver uses. * dps per digit (corrected): .0610 * 1.15 = .0702 * digits per dps (corrected): .0702E-1 = 14.247 * * Now, 14.247 truncating to 14 loses a lot of precision, so we rescale the * min/max range by 1000. */ #define JC_IMU_PREC_RANGE_SCALE 1000 /* Note: change mag and res_per_dps if prec_range_scale is ever altered */ #define JC_IMU_MAX_GYRO_MAG 32767000 /* (2^16-1)*1000 */ #define JC_IMU_GYRO_RES_PER_DPS 14247 /* (14.247*1000) */ #define JC_IMU_GYRO_FUZZ 10 #define JC_IMU_GYRO_FLAT 0 /* frequency/amplitude tables for rumble */ struct joycon_rumble_freq_data { u16 high; u8 low; u16 freq; /* Hz*/ }; struct joycon_rumble_amp_data { u8 high; u16 low; u16 amp; }; #if IS_ENABLED(CONFIG_NINTENDO_FF) /* * These tables are from * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md */ static const struct joycon_rumble_freq_data joycon_rumble_frequencies[] = { /* high, low, freq */ { 0x0000, 0x01, 41 }, { 0x0000, 0x02, 42 }, { 0x0000, 0x03, 43 }, { 0x0000, 0x04, 44 }, { 0x0000, 0x05, 45 }, { 0x0000, 0x06, 46 }, { 0x0000, 0x07, 47 }, { 0x0000, 0x08, 48 }, { 0x0000, 0x09, 49 }, { 0x0000, 0x0A, 50 }, { 0x0000, 0x0B, 51 }, { 0x0000, 0x0C, 52 }, { 0x0000, 0x0D, 53 }, { 0x0000, 0x0E, 54 }, { 0x0000, 0x0F, 55 }, { 0x0000, 0x10, 57 }, { 0x0000, 0x11, 58 }, { 0x0000, 0x12, 59 }, { 0x0000, 0x13, 60 }, { 0x0000, 0x14, 62 }, { 0x0000, 0x15, 63 }, { 0x0000, 0x16, 64 }, { 0x0000, 0x17, 66 }, { 0x0000, 0x18, 67 }, { 0x0000, 0x19, 69 }, { 0x0000, 0x1A, 70 }, { 0x0000, 0x1B, 72 }, { 0x0000, 0x1C, 73 }, { 0x0000, 0x1D, 75 }, { 0x0000, 0x1e, 77 }, { 0x0000, 0x1f, 78 }, { 0x0000, 0x20, 80 }, { 0x0400, 0x21, 82 }, { 0x0800, 0x22, 84 }, { 0x0c00, 0x23, 85 }, { 0x1000, 0x24, 87 }, { 0x1400, 0x25, 89 }, { 0x1800, 0x26, 91 }, { 0x1c00, 0x27, 93 }, { 0x2000, 0x28, 95 }, { 0x2400, 0x29, 97 }, { 0x2800, 0x2a, 99 }, { 0x2c00, 0x2b, 102 }, { 0x3000, 0x2c, 104 }, { 0x3400, 0x2d, 106 }, { 0x3800, 0x2e, 108 }, { 0x3c00, 0x2f, 111 }, { 0x4000, 0x30, 113 }, { 0x4400, 0x31, 116 }, { 0x4800, 0x32, 118 }, { 0x4c00, 0x33, 121 }, { 0x5000, 0x34, 123 }, { 0x5400, 0x35, 126 }, { 0x5800, 0x36, 129 }, { 0x5c00, 0x37, 132 }, { 0x6000, 0x38, 135 }, { 0x6400, 0x39, 137 }, { 0x6800, 0x3a, 141 }, { 0x6c00, 0x3b, 144 }, { 0x7000, 0x3c, 147 }, { 0x7400, 0x3d, 150 }, { 0x7800, 0x3e, 153 }, { 0x7c00, 0x3f, 157 }, { 0x8000, 0x40, 160 }, { 0x8400, 0x41, 164 }, { 0x8800, 0x42, 167 }, { 0x8c00, 0x43, 171 }, { 0x9000, 0x44, 174 }, { 0x9400, 0x45, 178 }, { 0x9800, 0x46, 182 }, { 0x9c00, 0x47, 186 }, { 0xa000, 0x48, 190 }, { 0xa400, 0x49, 194 }, { 0xa800, 0x4a, 199 }, { 0xac00, 0x4b, 203 }, { 0xb000, 0x4c, 207 }, { 0xb400, 0x4d, 212 }, { 0xb800, 0x4e, 217 }, { 0xbc00, 0x4f, 221 }, { 0xc000, 0x50, 226 }, { 0xc400, 0x51, 231 }, { 0xc800, 0x52, 236 }, { 0xcc00, 0x53, 241 }, { 0xd000, 0x54, 247 }, { 0xd400, 0x55, 252 }, { 0xd800, 0x56, 258 }, { 0xdc00, 0x57, 263 }, { 0xe000, 0x58, 269 }, { 0xe400, 0x59, 275 }, { 0xe800, 0x5a, 281 }, { 0xec00, 0x5b, 287 }, { 0xf000, 0x5c, 293 }, { 0xf400, 0x5d, 300 }, { 0xf800, 0x5e, 306 }, { 0xfc00, 0x5f, 313 }, { 0x0001, 0x60, 320 }, { 0x0401, 0x61, 327 }, { 0x0801, 0x62, 334 }, { 0x0c01, 0x63, 341 }, { 0x1001, 0x64, 349 }, { 0x1401, 0x65, 357 }, { 0x1801, 0x66, 364 }, { 0x1c01, 0x67, 372 }, { 0x2001, 0x68, 381 }, { 0x2401, 0x69, 389 }, { 0x2801, 0x6a, 397 }, { 0x2c01, 0x6b, 406 }, { 0x3001, 0x6c, 415 }, { 0x3401, 0x6d, 424 }, { 0x3801, 0x6e, 433 }, { 0x3c01, 0x6f, 443 }, { 0x4001, 0x70, 453 }, { 0x4401, 0x71, 462 }, { 0x4801, 0x72, 473 }, { 0x4c01, 0x73, 483 }, { 0x5001, 0x74, 494 }, { 0x5401, 0x75, 504 }, { 0x5801, 0x76, 515 }, { 0x5c01, 0x77, 527 }, { 0x6001, 0x78, 538 }, { 0x6401, 0x79, 550 }, { 0x6801, 0x7a, 562 }, { 0x6c01, 0x7b, 574 }, { 0x7001, 0x7c, 587 }, { 0x7401, 0x7d, 600 }, { 0x7801, 0x7e, 613 }, { 0x7c01, 0x7f, 626 }, { 0x8001, 0x00, 640 }, { 0x8401, 0x00, 654 }, { 0x8801, 0x00, 668 }, { 0x8c01, 0x00, 683 }, { 0x9001, 0x00, 698 }, { 0x9401, 0x00, 713 }, { 0x9801, 0x00, 729 }, { 0x9c01, 0x00, 745 }, { 0xa001, 0x00, 761 }, { 0xa401, 0x00, 778 }, { 0xa801, 0x00, 795 }, { 0xac01, 0x00, 812 }, { 0xb001, 0x00, 830 }, { 0xb401, 0x00, 848 }, { 0xb801, 0x00, 867 }, { 0xbc01, 0x00, 886 }, { 0xc001, 0x00, 905 }, { 0xc401, 0x00, 925 }, { 0xc801, 0x00, 945 }, { 0xcc01, 0x00, 966 }, { 0xd001, 0x00, 987 }, { 0xd401, 0x00, 1009 }, { 0xd801, 0x00, 1031 }, { 0xdc01, 0x00, 1053 }, { 0xe001, 0x00, 1076 }, { 0xe401, 0x00, 1100 }, { 0xe801, 0x00, 1124 }, { 0xec01, 0x00, 1149 }, { 0xf001, 0x00, 1174 }, { 0xf401, 0x00, 1199 }, { 0xf801, 0x00, 1226 }, { 0xfc01, 0x00, 1253 } }; #define joycon_max_rumble_amp (1003) static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = { /* high, low, amp */ { 0x00, 0x0040, 0 }, { 0x02, 0x8040, 10 }, { 0x04, 0x0041, 12 }, { 0x06, 0x8041, 14 }, { 0x08, 0x0042, 17 }, { 0x0a, 0x8042, 20 }, { 0x0c, 0x0043, 24 }, { 0x0e, 0x8043, 28 }, { 0x10, 0x0044, 33 }, { 0x12, 0x8044, 40 }, { 0x14, 0x0045, 47 }, { 0x16, 0x8045, 56 }, { 0x18, 0x0046, 67 }, { 0x1a, 0x8046, 80 }, { 0x1c, 0x0047, 95 }, { 0x1e, 0x8047, 112 }, { 0x20, 0x0048, 117 }, { 0x22, 0x8048, 123 }, { 0x24, 0x0049, 128 }, { 0x26, 0x8049, 134 }, { 0x28, 0x004a, 140 }, { 0x2a, 0x804a, 146 }, { 0x2c, 0x004b, 152 }, { 0x2e, 0x804b, 159 }, { 0x30, 0x004c, 166 }, { 0x32, 0x804c, 173 }, { 0x34, 0x004d, 181 }, { 0x36, 0x804d, 189 }, { 0x38, 0x004e, 198 }, { 0x3a, 0x804e, 206 }, { 0x3c, 0x004f, 215 }, { 0x3e, 0x804f, 225 }, { 0x40, 0x0050, 230 }, { 0x42, 0x8050, 235 }, { 0x44, 0x0051, 240 }, { 0x46, 0x8051, 245 }, { 0x48, 0x0052, 251 }, { 0x4a, 0x8052, 256 }, { 0x4c, 0x0053, 262 }, { 0x4e, 0x8053, 268 }, { 0x50, 0x0054, 273 }, { 0x52, 0x8054, 279 }, { 0x54, 0x0055, 286 }, { 0x56, 0x8055, 292 }, { 0x58, 0x0056, 298 }, { 0x5a, 0x8056, 305 }, { 0x5c, 0x0057, 311 }, { 0x5e, 0x8057, 318 }, { 0x60, 0x0058, 325 }, { 0x62, 0x8058, 332 }, { 0x64, 0x0059, 340 }, { 0x66, 0x8059, 347 }, { 0x68, 0x005a, 355 }, { 0x6a, 0x805a, 362 }, { 0x6c, 0x005b, 370 }, { 0x6e, 0x805b, 378 }, { 0x70, 0x005c, 387 }, { 0x72, 0x805c, 395 }, { 0x74, 0x005d, 404 }, { 0x76, 0x805d, 413 }, { 0x78, 0x005e, 422 }, { 0x7a, 0x805e, 431 }, { 0x7c, 0x005f, 440 }, { 0x7e, 0x805f, 450 }, { 0x80, 0x0060, 460 }, { 0x82, 0x8060, 470 }, { 0x84, 0x0061, 480 }, { 0x86, 0x8061, 491 }, { 0x88, 0x0062, 501 }, { 0x8a, 0x8062, 512 }, { 0x8c, 0x0063, 524 }, { 0x8e, 0x8063, 535 }, { 0x90, 0x0064, 547 }, { 0x92, 0x8064, 559 }, { 0x94, 0x0065, 571 }, { 0x96, 0x8065, 584 }, { 0x98, 0x0066, 596 }, { 0x9a, 0x8066, 609 }, { 0x9c, 0x0067, 623 }, { 0x9e, 0x8067, 636 }, { 0xa0, 0x0068, 650 }, { 0xa2, 0x8068, 665 }, { 0xa4, 0x0069, 679 }, { 0xa6, 0x8069, 694 }, { 0xa8, 0x006a, 709 }, { 0xaa, 0x806a, 725 }, { 0xac, 0x006b, 741 }, { 0xae, 0x806b, 757 }, { 0xb0, 0x006c, 773 }, { 0xb2, 0x806c, 790 }, { 0xb4, 0x006d, 808 }, { 0xb6, 0x806d, 825 }, { 0xb8, 0x006e, 843 }, { 0xba, 0x806e, 862 }, { 0xbc, 0x006f, 881 }, { 0xbe, 0x806f, 900 }, { 0xc0, 0x0070, 920 }, { 0xc2, 0x8070, 940 }, { 0xc4, 0x0071, 960 }, { 0xc6, 0x8071, 981 }, { 0xc8, 0x0072, joycon_max_rumble_amp } }; static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160; static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320; static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5; #endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */ static const u16 JC_RUMBLE_PERIOD_MS = 50; /* States for controller state machine */ enum joycon_ctlr_state { JOYCON_CTLR_STATE_INIT, JOYCON_CTLR_STATE_READ, JOYCON_CTLR_STATE_REMOVED, }; /* Controller type received as part of device info */ enum joycon_ctlr_type { JOYCON_CTLR_TYPE_JCL = 0x01, JOYCON_CTLR_TYPE_JCR = 0x02, JOYCON_CTLR_TYPE_PRO = 0x03, JOYCON_CTLR_TYPE_NESL = 0x09, JOYCON_CTLR_TYPE_NESR = 0x0A, JOYCON_CTLR_TYPE_SNES = 0x0B, JOYCON_CTLR_TYPE_GEN = 0x0D, JOYCON_CTLR_TYPE_N64 = 0x0C, }; struct joycon_stick_cal { s32 max; s32 min; s32 center; }; struct joycon_imu_cal { s16 offset[3]; s16 scale[3]; }; /* * All the controller's button values are stored in a u32. * They can be accessed with bitwise ANDs. */ #define JC_BTN_Y BIT(0) #define JC_BTN_X BIT(1) #define JC_BTN_B BIT(2) #define JC_BTN_A BIT(3) #define JC_BTN_SR_R BIT(4) #define JC_BTN_SL_R BIT(5) #define JC_BTN_R BIT(6) #define JC_BTN_ZR BIT(7) #define JC_BTN_MINUS BIT(8) #define JC_BTN_PLUS BIT(9) #define JC_BTN_RSTICK BIT(10) #define JC_BTN_LSTICK BIT(11) #define JC_BTN_HOME BIT(12) #define JC_BTN_CAP BIT(13) /* capture button */ #define JC_BTN_DOWN BIT(16) #define JC_BTN_UP BIT(17) #define JC_BTN_RIGHT BIT(18) #define JC_BTN_LEFT BIT(19) #define JC_BTN_SR_L BIT(20) #define JC_BTN_SL_L BIT(21) #define JC_BTN_L BIT(22) #define JC_BTN_ZL BIT(23) struct joycon_ctlr_button_mapping { u32 code; u32 bit; }; /* * D-pad is configured as buttons for the left Joy-Con only! */ static const struct joycon_ctlr_button_mapping left_joycon_button_mappings[] = { { BTN_TL, JC_BTN_L, }, { BTN_TL2, JC_BTN_ZL, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_THUMBL, JC_BTN_LSTICK, }, { BTN_DPAD_UP, JC_BTN_UP, }, { BTN_DPAD_DOWN, JC_BTN_DOWN, }, { BTN_DPAD_LEFT, JC_BTN_LEFT, }, { BTN_DPAD_RIGHT, JC_BTN_RIGHT, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; /* * The unused *right*-side triggers become the SL/SR triggers for the *left* * Joy-Con, if and only if we're not using a charging grip. */ static const struct joycon_ctlr_button_mapping left_joycon_s_button_mappings[] = { { BTN_TR, JC_BTN_SL_L, }, { BTN_TR2, JC_BTN_SR_L, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping right_joycon_button_mappings[] = { { BTN_EAST, JC_BTN_A, }, { BTN_SOUTH, JC_BTN_B, }, { BTN_NORTH, JC_BTN_X, }, { BTN_WEST, JC_BTN_Y, }, { BTN_TR, JC_BTN_R, }, { BTN_TR2, JC_BTN_ZR, }, { BTN_START, JC_BTN_PLUS, }, { BTN_THUMBR, JC_BTN_RSTICK, }, { BTN_MODE, JC_BTN_HOME, }, { /* sentinel */ }, }; /* * The unused *left*-side triggers become the SL/SR triggers for the *right* * Joy-Con, if and only if we're not using a charging grip. */ static const struct joycon_ctlr_button_mapping right_joycon_s_button_mappings[] = { { BTN_TL, JC_BTN_SL_R, }, { BTN_TL2, JC_BTN_SR_R, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping procon_button_mappings[] = { { BTN_EAST, JC_BTN_A, }, { BTN_SOUTH, JC_BTN_B, }, { BTN_NORTH, JC_BTN_X, }, { BTN_WEST, JC_BTN_Y, }, { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_TL2, JC_BTN_ZL, }, { BTN_TR2, JC_BTN_ZR, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_START, JC_BTN_PLUS, }, { BTN_THUMBL, JC_BTN_LSTICK, }, { BTN_THUMBR, JC_BTN_RSTICK, }, { BTN_MODE, JC_BTN_HOME, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping nescon_button_mappings[] = { { BTN_SOUTH, JC_BTN_A, }, { BTN_EAST, JC_BTN_B, }, { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_START, JC_BTN_PLUS, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = { { BTN_EAST, JC_BTN_A, }, { BTN_SOUTH, JC_BTN_B, }, { BTN_NORTH, JC_BTN_X, }, { BTN_WEST, JC_BTN_Y, }, { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_TL2, JC_BTN_ZL, }, { BTN_TR2, JC_BTN_ZR, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_START, JC_BTN_PLUS, }, { /* sentinel */ }, }; /* * "A", "B", and "C" are mapped positionally, rather than by label (e.g., "A" * gets assigned to BTN_EAST instead of BTN_A). */ static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = { { BTN_SOUTH, JC_BTN_A, }, { BTN_EAST, JC_BTN_B, }, { BTN_WEST, JC_BTN_R, }, { BTN_SELECT, JC_BTN_ZR, }, { BTN_START, JC_BTN_PLUS, }, { BTN_MODE, JC_BTN_HOME, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; /* * N64's C buttons get assigned to d-pad directions and registered as buttons. */ static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = { { BTN_A, JC_BTN_A, }, { BTN_B, JC_BTN_B, }, { BTN_TL2, JC_BTN_ZL, }, /* Z */ { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_TR2, JC_BTN_LSTICK, }, /* ZR */ { BTN_START, JC_BTN_PLUS, }, { BTN_SELECT, JC_BTN_Y, }, /* C UP */ { BTN_X, JC_BTN_ZR, }, /* C DOWN */ { BTN_Y, JC_BTN_X, }, /* C LEFT */ { BTN_C, JC_BTN_MINUS, }, /* C RIGHT */ { BTN_MODE, JC_BTN_HOME, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; enum joycon_msg_type { JOYCON_MSG_TYPE_NONE, JOYCON_MSG_TYPE_USB, JOYCON_MSG_TYPE_SUBCMD, }; struct joycon_rumble_output { u8 output_id; u8 packet_num; u8 rumble_data[8]; } __packed; struct joycon_subcmd_request { u8 output_id; /* must be 0x01 for subcommand, 0x10 for rumble only */ u8 packet_num; /* incremented every send */ u8 rumble_data[8]; u8 subcmd_id; u8 data[]; /* length depends on the subcommand */ } __packed; struct joycon_subcmd_reply { u8 ack; /* MSB 1 for ACK, 0 for NACK */ u8 id; /* id of requested subcmd */ u8 data[]; /* will be at most 35 bytes */ } __packed; struct joycon_imu_data { s16 accel_x; s16 accel_y; s16 accel_z; s16 gyro_x; s16 gyro_y; s16 gyro_z; } __packed; struct joycon_input_report { u8 id; u8 timer; u8 bat_con; /* battery and connection info */ u8 button_status[3]; u8 left_stick[3]; u8 right_stick[3]; u8 vibrator_report; union { struct joycon_subcmd_reply subcmd_reply; /* IMU input reports contain 3 samples */ u8 imu_raw_bytes[sizeof(struct joycon_imu_data) * 3]; }; } __packed; #define JC_MAX_RESP_SIZE (sizeof(struct joycon_input_report) + 35) #define JC_RUMBLE_DATA_SIZE 8 #define JC_RUMBLE_QUEUE_SIZE 8 static const char * const joycon_player_led_names[] = { LED_FUNCTION_PLAYER1, LED_FUNCTION_PLAYER2, LED_FUNCTION_PLAYER3, LED_FUNCTION_PLAYER4, }; #define JC_NUM_LEDS ARRAY_SIZE(joycon_player_led_names) #define JC_NUM_LED_PATTERNS 8 /* Taken from https://www.nintendo.com/my/support/qa/detail/33822 */ static const enum led_brightness joycon_player_led_patterns[JC_NUM_LED_PATTERNS][JC_NUM_LEDS] = { { 1, 0, 0, 0 }, { 1, 1, 0, 0 }, { 1, 1, 1, 0 }, { 1, 1, 1, 1 }, { 1, 0, 0, 1 }, { 1, 0, 1, 0 }, { 1, 0, 1, 1 }, { 0, 1, 1, 0 }, }; /* Each physical controller is associated with a joycon_ctlr struct */ struct joycon_ctlr { struct hid_device *hdev; struct input_dev *input; u32 player_id; struct led_classdev leds[JC_NUM_LEDS]; /* player leds */ struct led_classdev home_led; enum joycon_ctlr_state ctlr_state; spinlock_t lock; u8 mac_addr[6]; char *mac_addr_str; enum joycon_ctlr_type ctlr_type; /* The following members are used for synchronous sends/receives */ enum joycon_msg_type msg_type; u8 subcmd_num; struct mutex output_mutex; u8 input_buf[JC_MAX_RESP_SIZE]; wait_queue_head_t wait; bool received_resp; u8 usb_ack_match; u8 subcmd_ack_match; bool received_input_report; unsigned int last_input_report_msecs; unsigned int last_subcmd_sent_msecs; unsigned int consecutive_valid_report_deltas; /* factory calibration data */ struct joycon_stick_cal left_stick_cal_x; struct joycon_stick_cal left_stick_cal_y; struct joycon_stick_cal right_stick_cal_x; struct joycon_stick_cal right_stick_cal_y; struct joycon_imu_cal accel_cal; struct joycon_imu_cal gyro_cal; /* prevents needlessly recalculating these divisors every sample */ s32 imu_cal_accel_divisor[3]; s32 imu_cal_gyro_divisor[3]; /* power supply data */ struct power_supply *battery; struct power_supply_desc battery_desc; u8 battery_capacity; bool battery_charging; bool host_powered; /* rumble */ u8 rumble_data[JC_RUMBLE_QUEUE_SIZE][JC_RUMBLE_DATA_SIZE]; int rumble_queue_head; int rumble_queue_tail; struct workqueue_struct *rumble_queue; struct work_struct rumble_worker; unsigned int rumble_msecs; u16 rumble_ll_freq; u16 rumble_lh_freq; u16 rumble_rl_freq; u16 rumble_rh_freq; unsigned short rumble_zero_countdown; /* imu */ struct input_dev *imu_input; bool imu_first_packet_received; /* helps in initiating timestamp */ unsigned int imu_timestamp_us; /* timestamp we report to userspace */ unsigned int imu_last_pkt_ms; /* used to calc imu report delta */ /* the following are used to track the average imu report time delta */ unsigned int imu_delta_samples_count; unsigned int imu_delta_samples_sum; unsigned int imu_avg_delta_ms; }; /* Helper macros for checking controller type */ #define jc_type_is_joycon(ctlr) \ (ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONL || \ ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONR || \ ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP) #define jc_type_is_procon(ctlr) \ (ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_PROCON) #define jc_type_is_chrggrip(ctlr) \ (ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP) /* Does this controller have inputs associated with left joycon? */ #define jc_type_has_left(ctlr) \ (ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCL || \ ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO || \ ctlr->ctlr_type == JOYCON_CTLR_TYPE_N64) /* Does this controller have inputs associated with right joycon? */ #define jc_type_has_right(ctlr) \ (ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR || \ ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO) /* * Controller device helpers * * These look at the device ID known to the HID subsystem to identify a device, * but take caution: some NSO devices lie about themselves (NES Joy-Cons and * Sega Genesis controller). See type helpers below. * * These helpers are most useful early during the HID probe or in conjunction * with the capability helpers below. */ static inline bool joycon_device_is_chrggrip(struct joycon_ctlr *ctlr) { return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP; } /* * Controller type helpers * * These are slightly different than the device-ID-based helpers above. They are * generally more reliable, since they can distinguish between, e.g., Genesis * versus SNES, or NES Joy-Cons versus regular Switch Joy-Cons. They're most * useful for reporting available inputs. For other kinds of distinctions, see * the capability helpers below. * * They have two major drawbacks: (1) they're not available until after we set * the reporting method and then request the device info; (2) they can't * distinguish all controllers (like the Charging Grip from the Pro controller.) */ static inline bool joycon_type_is_left_joycon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCL; } static inline bool joycon_type_is_right_joycon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR; } static inline bool joycon_type_is_procon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO; } static inline bool joycon_type_is_snescon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_SNES; } static inline bool joycon_type_is_gencon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_GEN; } static inline bool joycon_type_is_n64con(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_N64; } static inline bool joycon_type_is_left_nescon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_NESL; } static inline bool joycon_type_is_right_nescon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_NESR; } static inline bool joycon_type_is_any_joycon(struct joycon_ctlr *ctlr) { return joycon_type_is_left_joycon(ctlr) || joycon_type_is_right_joycon(ctlr) || joycon_device_is_chrggrip(ctlr); } static inline bool joycon_type_is_any_nescon(struct joycon_ctlr *ctlr) { return joycon_type_is_left_nescon(ctlr) || joycon_type_is_right_nescon(ctlr); } /* * Controller capability helpers * * These helpers combine the use of the helpers above to detect certain * capabilities during initialization. They are always accurate but (since they * use type helpers) cannot be used early in the HID probe. */ static inline bool joycon_has_imu(struct joycon_ctlr *ctlr) { return joycon_device_is_chrggrip(ctlr) || joycon_type_is_any_joycon(ctlr) || joycon_type_is_procon(ctlr); } static inline bool joycon_has_joysticks(struct joycon_ctlr *ctlr) { return joycon_device_is_chrggrip(ctlr) || joycon_type_is_any_joycon(ctlr) || joycon_type_is_procon(ctlr) || joycon_type_is_n64con(ctlr); } static inline bool joycon_has_rumble(struct joycon_ctlr *ctlr) { return joycon_device_is_chrggrip(ctlr) || joycon_type_is_any_joycon(ctlr) || joycon_type_is_procon(ctlr) || joycon_type_is_n64con(ctlr); } static inline bool joycon_using_usb(struct joycon_ctlr *ctlr) { return ctlr->hdev->bus == BUS_USB; } static int __joycon_hid_send(struct hid_device *hdev, u8 *data, size_t len) { u8 *buf; int ret; buf = kmemdup(data, len, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_output_report(hdev, buf, len); kfree(buf); if (ret < 0) hid_dbg(hdev, "Failed to send output report ret=%d\n", ret); return ret; } static void joycon_wait_for_input_report(struct joycon_ctlr *ctlr) { int ret; /* * If we are in the proper reporting mode, wait for an input * report prior to sending the subcommand. This improves * reliability considerably. */ if (ctlr->ctlr_state == JOYCON_CTLR_STATE_READ) { unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); ctlr->received_input_report = false; spin_unlock_irqrestore(&ctlr->lock, flags); ret = wait_event_timeout(ctlr->wait, ctlr->received_input_report, HZ / 4); /* We will still proceed, even with a timeout here */ if (!ret) hid_warn(ctlr->hdev, "timeout waiting for input report\n"); } } /* * Sending subcommands and/or rumble data at too high a rate can cause bluetooth * controller disconnections. */ #define JC_INPUT_REPORT_MIN_DELTA 8 #define JC_INPUT_REPORT_MAX_DELTA 17 #define JC_SUBCMD_TX_OFFSET_MS 4 #define JC_SUBCMD_VALID_DELTA_REQ 3 #define JC_SUBCMD_RATE_MAX_ATTEMPTS 500 #define JC_SUBCMD_RATE_LIMITER_USB_MS 20 #define JC_SUBCMD_RATE_LIMITER_BT_MS 60 #define JC_SUBCMD_RATE_LIMITER_MS(ctlr) ((ctlr)->hdev->bus == BUS_USB ? JC_SUBCMD_RATE_LIMITER_USB_MS : JC_SUBCMD_RATE_LIMITER_BT_MS) static void joycon_enforce_subcmd_rate(struct joycon_ctlr *ctlr) { unsigned int current_ms; unsigned long subcmd_delta; int consecutive_valid_deltas = 0; int attempts = 0; unsigned long flags; if (unlikely(ctlr->ctlr_state != JOYCON_CTLR_STATE_READ)) return; do { joycon_wait_for_input_report(ctlr); current_ms = jiffies_to_msecs(jiffies); subcmd_delta = current_ms - ctlr->last_subcmd_sent_msecs; spin_lock_irqsave(&ctlr->lock, flags); consecutive_valid_deltas = ctlr->consecutive_valid_report_deltas; spin_unlock_irqrestore(&ctlr->lock, flags); attempts++; } while ((consecutive_valid_deltas < JC_SUBCMD_VALID_DELTA_REQ || subcmd_delta < JC_SUBCMD_RATE_LIMITER_MS(ctlr)) && ctlr->ctlr_state == JOYCON_CTLR_STATE_READ && attempts < JC_SUBCMD_RATE_MAX_ATTEMPTS); if (attempts >= JC_SUBCMD_RATE_MAX_ATTEMPTS) { hid_warn(ctlr->hdev, "%s: exceeded max attempts", __func__); return; } ctlr->last_subcmd_sent_msecs = current_ms; /* * Wait a short time after receiving an input report before * transmitting. This should reduce odds of a TX coinciding with an RX. * Minimizing concurrent BT traffic with the controller seems to lower * the rate of disconnections. */ msleep(JC_SUBCMD_TX_OFFSET_MS); } static int joycon_hid_send_sync(struct joycon_ctlr *ctlr, u8 *data, size_t len, u32 timeout) { int ret; int tries = 2; /* * The controller occasionally seems to drop subcommands. In testing, * doing one retry after a timeout appears to always work. */ while (tries--) { joycon_enforce_subcmd_rate(ctlr); ret = __joycon_hid_send(ctlr->hdev, data, len); if (ret < 0) { memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE); return ret; } ret = wait_event_timeout(ctlr->wait, ctlr->received_resp, timeout); if (!ret) { hid_dbg(ctlr->hdev, "synchronous send/receive timed out\n"); if (tries) { hid_dbg(ctlr->hdev, "retrying sync send after timeout\n"); } memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE); ret = -ETIMEDOUT; } else { ret = 0; break; } } ctlr->received_resp = false; return ret; } static int joycon_send_usb(struct joycon_ctlr *ctlr, u8 cmd, u32 timeout) { int ret; u8 buf[2] = {JC_OUTPUT_USB_CMD}; buf[1] = cmd; ctlr->usb_ack_match = cmd; ctlr->msg_type = JOYCON_MSG_TYPE_USB; ret = joycon_hid_send_sync(ctlr, buf, sizeof(buf), timeout); if (ret) hid_dbg(ctlr->hdev, "send usb command failed; ret=%d\n", ret); return ret; } static int joycon_send_subcmd(struct joycon_ctlr *ctlr, struct joycon_subcmd_request *subcmd, size_t data_len, u32 timeout) { int ret; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); /* * If the controller has been removed, just return ENODEV so the LED * subsystem doesn't print invalid errors on removal. */ if (ctlr->ctlr_state == JOYCON_CTLR_STATE_REMOVED) { spin_unlock_irqrestore(&ctlr->lock, flags); return -ENODEV; } memcpy(subcmd->rumble_data, ctlr->rumble_data[ctlr->rumble_queue_tail], JC_RUMBLE_DATA_SIZE); spin_unlock_irqrestore(&ctlr->lock, flags); subcmd->output_id = JC_OUTPUT_RUMBLE_AND_SUBCMD; subcmd->packet_num = ctlr->subcmd_num; if (++ctlr->subcmd_num > 0xF) ctlr->subcmd_num = 0; ctlr->subcmd_ack_match = subcmd->subcmd_id; ctlr->msg_type = JOYCON_MSG_TYPE_SUBCMD; ret = joycon_hid_send_sync(ctlr, (u8 *)subcmd, sizeof(*subcmd) + data_len, timeout); if (ret < 0) hid_dbg(ctlr->hdev, "send subcommand failed; ret=%d\n", ret); else ret = 0; return ret; } /* Supply nibbles for flash and on. Ones correspond to active */ static int joycon_set_player_leds(struct joycon_ctlr *ctlr, u8 flash, u8 on) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SET_PLAYER_LIGHTS; req->data[0] = (flash << 4) | on; hid_dbg(ctlr->hdev, "setting player leds\n"); return joycon_send_subcmd(ctlr, req, 1, HZ/4); } static int joycon_set_home_led(struct joycon_ctlr *ctlr, enum led_brightness brightness) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 5] = { 0 }; u8 *data; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SET_HOME_LIGHT; data = req->data; data[0] = 0x01; data[1] = brightness << 4; data[2] = brightness | (brightness << 4); data[3] = 0x11; data[4] = 0x11; hid_dbg(ctlr->hdev, "setting home led brightness\n"); return joycon_send_subcmd(ctlr, req, 5, HZ/4); } static int joycon_request_spi_flash_read(struct joycon_ctlr *ctlr, u32 start_addr, u8 size, u8 **reply) { struct joycon_subcmd_request *req; struct joycon_input_report *report; u8 buffer[sizeof(*req) + 5] = { 0 }; u8 *data; int ret; if (!reply) return -EINVAL; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SPI_FLASH_READ; data = req->data; put_unaligned_le32(start_addr, data); data[4] = size; hid_dbg(ctlr->hdev, "requesting SPI flash data\n"); ret = joycon_send_subcmd(ctlr, req, 5, HZ); if (ret) { hid_err(ctlr->hdev, "failed reading SPI flash; ret=%d\n", ret); } else { report = (struct joycon_input_report *)ctlr->input_buf; /* The read data starts at the 6th byte */ *reply = &report->subcmd_reply.data[5]; } return ret; } /* * User calibration's presence is denoted with a magic byte preceding it. * returns 0 if magic val is present, 1 if not present, < 0 on error */ static int joycon_check_for_cal_magic(struct joycon_ctlr *ctlr, u32 flash_addr) { int ret; u8 *reply; ret = joycon_request_spi_flash_read(ctlr, flash_addr, JC_CAL_USR_MAGIC_SIZE, &reply); if (ret) return ret; return reply[0] != JC_CAL_USR_MAGIC_0 || reply[1] != JC_CAL_USR_MAGIC_1; } static int joycon_read_stick_calibration(struct joycon_ctlr *ctlr, u16 cal_addr, struct joycon_stick_cal *cal_x, struct joycon_stick_cal *cal_y, bool left_stick) { s32 x_max_above; s32 x_min_below; s32 y_max_above; s32 y_min_below; u8 *raw_cal; int ret; ret = joycon_request_spi_flash_read(ctlr, cal_addr, JC_CAL_STICK_DATA_SIZE, &raw_cal); if (ret) return ret; /* stick calibration parsing: note the order differs based on stick */ if (left_stick) { x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12); y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12); cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12); cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12); x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12); y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12); } else { cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12); cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12); x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12); y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12); x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12); y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12); } cal_x->max = cal_x->center + x_max_above; cal_x->min = cal_x->center - x_min_below; cal_y->max = cal_y->center + y_max_above; cal_y->min = cal_y->center - y_min_below; /* check if calibration values are plausible */ if (cal_x->min >= cal_x->center || cal_x->center >= cal_x->max || cal_y->min >= cal_y->center || cal_y->center >= cal_y->max) ret = -EINVAL; return ret; } static const u16 DFLT_STICK_CAL_CEN = 2000; static const u16 DFLT_STICK_CAL_MAX = 3500; static const u16 DFLT_STICK_CAL_MIN = 500; static void joycon_use_default_calibration(struct hid_device *hdev, struct joycon_stick_cal *cal_x, struct joycon_stick_cal *cal_y, const char *stick, int ret) { hid_warn(hdev, "Failed to read %s stick cal, using defaults; e=%d\n", stick, ret); cal_x->center = cal_y->center = DFLT_STICK_CAL_CEN; cal_x->max = cal_y->max = DFLT_STICK_CAL_MAX; cal_x->min = cal_y->min = DFLT_STICK_CAL_MIN; } static int joycon_request_calibration(struct joycon_ctlr *ctlr) { u16 left_stick_addr = JC_CAL_FCT_DATA_LEFT_ADDR; u16 right_stick_addr = JC_CAL_FCT_DATA_RIGHT_ADDR; int ret; hid_dbg(ctlr->hdev, "requesting cal data\n"); /* check if user stick calibrations are present */ if (!joycon_check_for_cal_magic(ctlr, JC_CAL_USR_LEFT_MAGIC_ADDR)) { left_stick_addr = JC_CAL_USR_LEFT_DATA_ADDR; hid_info(ctlr->hdev, "using user cal for left stick\n"); } else { hid_info(ctlr->hdev, "using factory cal for left stick\n"); } if (!joycon_check_for_cal_magic(ctlr, JC_CAL_USR_RIGHT_MAGIC_ADDR)) { right_stick_addr = JC_CAL_USR_RIGHT_DATA_ADDR; hid_info(ctlr->hdev, "using user cal for right stick\n"); } else { hid_info(ctlr->hdev, "using factory cal for right stick\n"); } /* read the left stick calibration data */ ret = joycon_read_stick_calibration(ctlr, left_stick_addr, &ctlr->left_stick_cal_x, &ctlr->left_stick_cal_y, true); if (ret) joycon_use_default_calibration(ctlr->hdev, &ctlr->left_stick_cal_x, &ctlr->left_stick_cal_y, "left", ret); /* read the right stick calibration data */ ret = joycon_read_stick_calibration(ctlr, right_stick_addr, &ctlr->right_stick_cal_x, &ctlr->right_stick_cal_y, false); if (ret) joycon_use_default_calibration(ctlr->hdev, &ctlr->right_stick_cal_x, &ctlr->right_stick_cal_y, "right", ret); hid_dbg(ctlr->hdev, "calibration:\n" "l_x_c=%d l_x_max=%d l_x_min=%d\n" "l_y_c=%d l_y_max=%d l_y_min=%d\n" "r_x_c=%d r_x_max=%d r_x_min=%d\n" "r_y_c=%d r_y_max=%d r_y_min=%d\n", ctlr->left_stick_cal_x.center, ctlr->left_stick_cal_x.max, ctlr->left_stick_cal_x.min, ctlr->left_stick_cal_y.center, ctlr->left_stick_cal_y.max, ctlr->left_stick_cal_y.min, ctlr->right_stick_cal_x.center, ctlr->right_stick_cal_x.max, ctlr->right_stick_cal_x.min, ctlr->right_stick_cal_y.center, ctlr->right_stick_cal_y.max, ctlr->right_stick_cal_y.min); return 0; } /* * These divisors are calculated once rather than for each sample. They are only * dependent on the IMU calibration values. They are used when processing the * IMU input reports. */ static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr) { int i, divz = 0; for (i = 0; i < 3; i++) { ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] - ctlr->accel_cal.offset[i]; ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] - ctlr->gyro_cal.offset[i]; if (ctlr->imu_cal_accel_divisor[i] == 0) { ctlr->imu_cal_accel_divisor[i] = 1; divz++; } if (ctlr->imu_cal_gyro_divisor[i] == 0) { ctlr->imu_cal_gyro_divisor[i] = 1; divz++; } } if (divz) hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz); } static const s16 DFLT_ACCEL_OFFSET /*= 0*/; static const s16 DFLT_ACCEL_SCALE = 16384; static const s16 DFLT_GYRO_OFFSET /*= 0*/; static const s16 DFLT_GYRO_SCALE = 13371; static int joycon_request_imu_calibration(struct joycon_ctlr *ctlr) { u16 imu_cal_addr = JC_IMU_CAL_FCT_DATA_ADDR; u8 *raw_cal; int ret; int i; /* check if user calibration exists */ if (!joycon_check_for_cal_magic(ctlr, JC_IMU_CAL_USR_MAGIC_ADDR)) { imu_cal_addr = JC_IMU_CAL_USR_DATA_ADDR; hid_info(ctlr->hdev, "using user cal for IMU\n"); } else { hid_info(ctlr->hdev, "using factory cal for IMU\n"); } /* request IMU calibration data */ hid_dbg(ctlr->hdev, "requesting IMU cal data\n"); ret = joycon_request_spi_flash_read(ctlr, imu_cal_addr, JC_IMU_CAL_DATA_SIZE, &raw_cal); if (ret) { hid_warn(ctlr->hdev, "Failed to read IMU cal, using defaults; ret=%d\n", ret); for (i = 0; i < 3; i++) { ctlr->accel_cal.offset[i] = DFLT_ACCEL_OFFSET; ctlr->accel_cal.scale[i] = DFLT_ACCEL_SCALE; ctlr->gyro_cal.offset[i] = DFLT_GYRO_OFFSET; ctlr->gyro_cal.scale[i] = DFLT_GYRO_SCALE; } joycon_calc_imu_cal_divisors(ctlr); return ret; } /* IMU calibration parsing */ for (i = 0; i < 3; i++) { int j = i * 2; ctlr->accel_cal.offset[i] = get_unaligned_le16(raw_cal + j); ctlr->accel_cal.scale[i] = get_unaligned_le16(raw_cal + j + 6); ctlr->gyro_cal.offset[i] = get_unaligned_le16(raw_cal + j + 12); ctlr->gyro_cal.scale[i] = get_unaligned_le16(raw_cal + j + 18); } joycon_calc_imu_cal_divisors(ctlr); hid_dbg(ctlr->hdev, "IMU calibration:\n" "a_o[0]=%d a_o[1]=%d a_o[2]=%d\n" "a_s[0]=%d a_s[1]=%d a_s[2]=%d\n" "g_o[0]=%d g_o[1]=%d g_o[2]=%d\n" "g_s[0]=%d g_s[1]=%d g_s[2]=%d\n", ctlr->accel_cal.offset[0], ctlr->accel_cal.offset[1], ctlr->accel_cal.offset[2], ctlr->accel_cal.scale[0], ctlr->accel_cal.scale[1], ctlr->accel_cal.scale[2], ctlr->gyro_cal.offset[0], ctlr->gyro_cal.offset[1], ctlr->gyro_cal.offset[2], ctlr->gyro_cal.scale[0], ctlr->gyro_cal.scale[1], ctlr->gyro_cal.scale[2]); return 0; } static int joycon_set_report_mode(struct joycon_ctlr *ctlr) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SET_REPORT_MODE; req->data[0] = 0x30; /* standard, full report mode */ hid_dbg(ctlr->hdev, "setting controller report mode\n"); return joycon_send_subcmd(ctlr, req, 1, HZ); } static int joycon_enable_rumble(struct joycon_ctlr *ctlr) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_ENABLE_VIBRATION; req->data[0] = 0x01; /* note: 0x00 would disable */ hid_dbg(ctlr->hdev, "enabling rumble\n"); return joycon_send_subcmd(ctlr, req, 1, HZ/4); } static int joycon_enable_imu(struct joycon_ctlr *ctlr) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_ENABLE_IMU; req->data[0] = 0x01; /* note: 0x00 would disable */ hid_dbg(ctlr->hdev, "enabling IMU\n"); return joycon_send_subcmd(ctlr, req, 1, HZ); } static s32 joycon_map_stick_val(struct joycon_stick_cal *cal, s32 val) { s32 center = cal->center; s32 min = cal->min; s32 max = cal->max; s32 new_val; if (val > center) { new_val = (val - center) * JC_MAX_STICK_MAG; new_val /= (max - center); } else { new_val = (center - val) * -JC_MAX_STICK_MAG; new_val /= (center - min); } new_val = clamp(new_val, (s32)-JC_MAX_STICK_MAG, (s32)JC_MAX_STICK_MAG); return new_val; } static void joycon_input_report_parse_imu_data(struct joycon_ctlr *ctlr, struct joycon_input_report *rep, struct joycon_imu_data *imu_data) { u8 *raw = rep->imu_raw_bytes; int i; for (i = 0; i < 3; i++) { struct joycon_imu_data *data = &imu_data[i]; data->accel_x = get_unaligned_le16(raw + 0); data->accel_y = get_unaligned_le16(raw + 2); data->accel_z = get_unaligned_le16(raw + 4); data->gyro_x = get_unaligned_le16(raw + 6); data->gyro_y = get_unaligned_le16(raw + 8); data->gyro_z = get_unaligned_le16(raw + 10); /* point to next imu sample */ raw += sizeof(struct joycon_imu_data); } } static void joycon_parse_imu_report(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { struct joycon_imu_data imu_data[3] = {0}; /* 3 reports per packet */ struct input_dev *idev = ctlr->imu_input; unsigned int msecs = jiffies_to_msecs(jiffies); unsigned int last_msecs = ctlr->imu_last_pkt_ms; int i; int value[6]; joycon_input_report_parse_imu_data(ctlr, rep, imu_data); /* * There are complexities surrounding how we determine the timestamps we * associate with the samples we pass to userspace. The IMU input * reports do not provide us with a good timestamp. There's a quickly * incrementing 8-bit counter per input report, but it is not very * useful for this purpose (it is not entirely clear what rate it * increments at or if it varies based on packet push rate - more on * the push rate below...). * * The reverse engineering work done on the joy-cons and pro controllers * by the community seems to indicate the following: * - The controller samples the IMU every 1.35ms. It then does some of * its own processing, probably averaging the samples out. * - Each imu input report contains 3 IMU samples, (usually 5ms apart). * - In the standard reporting mode (which this driver uses exclusively) * input reports are pushed from the controller as follows: * * joy-con (bluetooth): every 15 ms * * joy-cons (in charging grip via USB): every 15 ms * * pro controller (USB): every 15 ms * * pro controller (bluetooth): every 8 ms (this is the wildcard) * * Further complicating matters is that some bluetooth stacks are known * to alter the controller's packet rate by hardcoding the bluetooth * SSR for the switch controllers (android's stack currently sets the * SSR to 11ms for both the joy-cons and pro controllers). * * In my own testing, I've discovered that my pro controller either * reports IMU sample batches every 11ms or every 15ms. This rate is * stable after connecting. It isn't 100% clear what determines this * rate. Importantly, even when sending every 11ms, none of the samples * are duplicates. This seems to indicate that the time deltas between * reported samples can vary based on the input report rate. * * The solution employed in this driver is to keep track of the average * time delta between IMU input reports. In testing, this value has * proven to be stable, staying at 15ms or 11ms, though other hardware * configurations and bluetooth stacks could potentially see other rates * (hopefully this will become more clear as more people use the * driver). * * Keeping track of the average report delta allows us to submit our * timestamps to userspace based on that. Each report contains 3 * samples, so the IMU sampling rate should be avg_time_delta/3. We can * also use this average to detect events where we have dropped a * packet. The userspace timestamp for the samples will be adjusted * accordingly to prevent unwanted behvaior. */ if (!ctlr->imu_first_packet_received) { ctlr->imu_timestamp_us = 0; ctlr->imu_delta_samples_count = 0; ctlr->imu_delta_samples_sum = 0; ctlr->imu_avg_delta_ms = JC_IMU_DFLT_AVG_DELTA_MS; ctlr->imu_first_packet_received = true; } else { unsigned int delta = msecs - last_msecs; unsigned int dropped_pkts; unsigned int dropped_threshold; /* avg imu report delta housekeeping */ ctlr->imu_delta_samples_sum += delta; ctlr->imu_delta_samples_count++; if (ctlr->imu_delta_samples_count >= JC_IMU_SAMPLES_PER_DELTA_AVG) { ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum / ctlr->imu_delta_samples_count; ctlr->imu_delta_samples_count = 0; ctlr->imu_delta_samples_sum = 0; } /* don't ever want divide by zero shenanigans */ if (ctlr->imu_avg_delta_ms == 0) { ctlr->imu_avg_delta_ms = 1; hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n"); } /* useful for debugging IMU sample rate */ hid_dbg(ctlr->hdev, "imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n", msecs, last_msecs, delta, ctlr->imu_avg_delta_ms); /* check if any packets have been dropped */ dropped_threshold = ctlr->imu_avg_delta_ms * 3 / 2; dropped_pkts = (delta - min(delta, dropped_threshold)) / ctlr->imu_avg_delta_ms; ctlr->imu_timestamp_us += 1000 * ctlr->imu_avg_delta_ms; if (dropped_pkts > JC_IMU_DROPPED_PKT_WARNING) { hid_warn(ctlr->hdev, "compensating for %u dropped IMU reports\n", dropped_pkts); hid_warn(ctlr->hdev, "delta=%u avg_delta=%u\n", delta, ctlr->imu_avg_delta_ms); } } ctlr->imu_last_pkt_ms = msecs; /* Each IMU input report contains three samples */ for (i = 0; i < 3; i++) { input_event(idev, EV_MSC, MSC_TIMESTAMP, ctlr->imu_timestamp_us); /* * These calculations (which use the controller's calibration * settings to improve the final values) are based on those * found in the community's reverse-engineering repo (linked at * top of driver). For hid-nintendo, we make sure that the final * value given to userspace is always in terms of the axis * resolution we provided. * * Currently only the gyro calculations subtract the calibration * offsets from the raw value itself. In testing, doing the same * for the accelerometer raw values decreased accuracy. * * Note that the gyro values are multiplied by the * precision-saving scaling factor to prevent large inaccuracies * due to truncation of the resolution value which would * otherwise occur. To prevent overflow (without resorting to 64 * bit integer math), the mult_frac macro is used. */ value[0] = mult_frac((JC_IMU_PREC_RANGE_SCALE * (imu_data[i].gyro_x - ctlr->gyro_cal.offset[0])), ctlr->gyro_cal.scale[0], ctlr->imu_cal_gyro_divisor[0]); value[1] = mult_frac((JC_IMU_PREC_RANGE_SCALE * (imu_data[i].gyro_y - ctlr->gyro_cal.offset[1])), ctlr->gyro_cal.scale[1], ctlr->imu_cal_gyro_divisor[1]); value[2] = mult_frac((JC_IMU_PREC_RANGE_SCALE * (imu_data[i].gyro_z - ctlr->gyro_cal.offset[2])), ctlr->gyro_cal.scale[2], ctlr->imu_cal_gyro_divisor[2]); value[3] = ((s32)imu_data[i].accel_x * ctlr->accel_cal.scale[0]) / ctlr->imu_cal_accel_divisor[0]; value[4] = ((s32)imu_data[i].accel_y * ctlr->accel_cal.scale[1]) / ctlr->imu_cal_accel_divisor[1]; value[5] = ((s32)imu_data[i].accel_z * ctlr->accel_cal.scale[2]) / ctlr->imu_cal_accel_divisor[2]; hid_dbg(ctlr->hdev, "raw_gyro: g_x=%d g_y=%d g_z=%d\n", imu_data[i].gyro_x, imu_data[i].gyro_y, imu_data[i].gyro_z); hid_dbg(ctlr->hdev, "raw_accel: a_x=%d a_y=%d a_z=%d\n", imu_data[i].accel_x, imu_data[i].accel_y, imu_data[i].accel_z); /* * The right joy-con has 2 axes negated, Y and Z. This is due to * the orientation of the IMU in the controller. We negate those * axes' values in order to be consistent with the left joy-con * and the pro controller: * X: positive is pointing toward the triggers * Y: positive is pointing to the left * Z: positive is pointing up (out of the buttons/sticks) * The axes follow the right-hand rule. */ if (jc_type_is_joycon(ctlr) && jc_type_has_right(ctlr)) { int j; /* negate all but x axis */ for (j = 1; j < 6; ++j) { if (j == 3) continue; value[j] *= -1; } } input_report_abs(idev, ABS_RX, value[0]); input_report_abs(idev, ABS_RY, value[1]); input_report_abs(idev, ABS_RZ, value[2]); input_report_abs(idev, ABS_X, value[3]); input_report_abs(idev, ABS_Y, value[4]); input_report_abs(idev, ABS_Z, value[5]); input_sync(idev); /* convert to micros and divide by 3 (3 samples per report). */ ctlr->imu_timestamp_us += ctlr->imu_avg_delta_ms * 1000 / 3; } } static void joycon_handle_rumble_report(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { unsigned long flags; unsigned long msecs = jiffies_to_msecs(jiffies); spin_lock_irqsave(&ctlr->lock, flags); if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED && (msecs - ctlr->rumble_msecs) >= JC_RUMBLE_PERIOD_MS && (ctlr->rumble_queue_head != ctlr->rumble_queue_tail || ctlr->rumble_zero_countdown > 0)) { /* * When this value reaches 0, we know we've sent multiple * packets to the controller instructing it to disable rumble. * We can safely stop sending periodic rumble packets until the * next ff effect. */ if (ctlr->rumble_zero_countdown > 0) ctlr->rumble_zero_countdown--; queue_work(ctlr->rumble_queue, &ctlr->rumble_worker); } spin_unlock_irqrestore(&ctlr->lock, flags); } static void joycon_parse_battery_status(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { u8 tmp; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); tmp = rep->bat_con; ctlr->host_powered = tmp & BIT(0); ctlr->battery_charging = tmp & BIT(4); tmp = tmp >> 5; switch (tmp) { case 0: /* empty */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; break; case 1: /* low */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_LOW; break; case 2: /* medium */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; case 3: /* high */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_HIGH; break; case 4: /* full */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_FULL; break; default: ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; hid_warn(ctlr->hdev, "Invalid battery status\n"); break; } spin_unlock_irqrestore(&ctlr->lock, flags); } static void joycon_report_left_stick(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { u16 raw_x; u16 raw_y; s32 x; s32 y; raw_x = hid_field_extract(ctlr->hdev, rep->left_stick, 0, 12); raw_y = hid_field_extract(ctlr->hdev, rep->left_stick + 1, 4, 12); x = joycon_map_stick_val(&ctlr->left_stick_cal_x, raw_x); y = -joycon_map_stick_val(&ctlr->left_stick_cal_y, raw_y); input_report_abs(ctlr->input, ABS_X, x); input_report_abs(ctlr->input, ABS_Y, y); } static void joycon_report_right_stick(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { u16 raw_x; u16 raw_y; s32 x; s32 y; raw_x = hid_field_extract(ctlr->hdev, rep->right_stick, 0, 12); raw_y = hid_field_extract(ctlr->hdev, rep->right_stick + 1, 4, 12); x = joycon_map_stick_val(&ctlr->right_stick_cal_x, raw_x); y = -joycon_map_stick_val(&ctlr->right_stick_cal_y, raw_y); input_report_abs(ctlr->input, ABS_RX, x); input_report_abs(ctlr->input, ABS_RY, y); } static void joycon_report_dpad(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { int hatx = 0; int haty = 0; u32 btns = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24); if (btns & JC_BTN_LEFT) hatx = -1; else if (btns & JC_BTN_RIGHT) hatx = 1; if (btns & JC_BTN_UP) haty = -1; else if (btns & JC_BTN_DOWN) haty = 1; input_report_abs(ctlr->input, ABS_HAT0X, hatx); input_report_abs(ctlr->input, ABS_HAT0Y, haty); } static void joycon_report_buttons(struct joycon_ctlr *ctlr, struct joycon_input_report *rep, const struct joycon_ctlr_button_mapping button_mappings[]) { const struct joycon_ctlr_button_mapping *button; u32 status = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24); for (button = button_mappings; button->code; button++) input_report_key(ctlr->input, button->code, status & button->bit); } static void joycon_parse_report(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { unsigned long flags; unsigned long msecs = jiffies_to_msecs(jiffies); unsigned long report_delta_ms = msecs - ctlr->last_input_report_msecs; if (joycon_has_rumble(ctlr)) joycon_handle_rumble_report(ctlr, rep); joycon_parse_battery_status(ctlr, rep); if (joycon_type_is_left_joycon(ctlr)) { joycon_report_left_stick(ctlr, rep); joycon_report_buttons(ctlr, rep, left_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_report_buttons(ctlr, rep, left_joycon_s_button_mappings); } else if (joycon_type_is_right_joycon(ctlr)) { joycon_report_right_stick(ctlr, rep); joycon_report_buttons(ctlr, rep, right_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_report_buttons(ctlr, rep, right_joycon_s_button_mappings); } else if (joycon_type_is_procon(ctlr)) { joycon_report_left_stick(ctlr, rep); joycon_report_right_stick(ctlr, rep); joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, procon_button_mappings); } else if (joycon_type_is_any_nescon(ctlr)) { joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, nescon_button_mappings); } else if (joycon_type_is_snescon(ctlr)) { joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, snescon_button_mappings); } else if (joycon_type_is_gencon(ctlr)) { joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, gencon_button_mappings); } else if (joycon_type_is_n64con(ctlr)) { joycon_report_left_stick(ctlr, rep); joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, n64con_button_mappings); } input_sync(ctlr->input); spin_lock_irqsave(&ctlr->lock, flags); ctlr->last_input_report_msecs = msecs; /* * Was this input report a reasonable time delta compared to the prior * report? We use this information to decide when a safe time is to send * rumble packets or subcommand packets. */ if (report_delta_ms >= JC_INPUT_REPORT_MIN_DELTA && report_delta_ms <= JC_INPUT_REPORT_MAX_DELTA) { if (ctlr->consecutive_valid_report_deltas < JC_SUBCMD_VALID_DELTA_REQ) ctlr->consecutive_valid_report_deltas++; } else { ctlr->consecutive_valid_report_deltas = 0; } /* * Our consecutive valid report tracking is only relevant for * bluetooth-connected controllers. For USB devices, we're beholden to * USB's underlying polling rate anyway. Always set to the consecutive * delta requirement. */ if (ctlr->hdev->bus == BUS_USB) ctlr->consecutive_valid_report_deltas = JC_SUBCMD_VALID_DELTA_REQ; spin_unlock_irqrestore(&ctlr->lock, flags); /* * Immediately after receiving a report is the most reliable time to * send a subcommand to the controller. Wake any subcommand senders * waiting for a report. */ if (unlikely(mutex_is_locked(&ctlr->output_mutex))) { spin_lock_irqsave(&ctlr->lock, flags); ctlr->received_input_report = true; spin_unlock_irqrestore(&ctlr->lock, flags); wake_up(&ctlr->wait); } /* parse IMU data if present */ if ((rep->id == JC_INPUT_IMU_DATA) && joycon_has_imu(ctlr)) joycon_parse_imu_report(ctlr, rep); } static int joycon_send_rumble_data(struct joycon_ctlr *ctlr) { int ret; unsigned long flags; struct joycon_rumble_output rumble_output = { 0 }; spin_lock_irqsave(&ctlr->lock, flags); /* * If the controller has been removed, just return ENODEV so the LED * subsystem doesn't print invalid errors on removal. */ if (ctlr->ctlr_state == JOYCON_CTLR_STATE_REMOVED) { spin_unlock_irqrestore(&ctlr->lock, flags); return -ENODEV; } memcpy(rumble_output.rumble_data, ctlr->rumble_data[ctlr->rumble_queue_tail], JC_RUMBLE_DATA_SIZE); spin_unlock_irqrestore(&ctlr->lock, flags); rumble_output.output_id = JC_OUTPUT_RUMBLE_ONLY; rumble_output.packet_num = ctlr->subcmd_num; if (++ctlr->subcmd_num > 0xF) ctlr->subcmd_num = 0; joycon_enforce_subcmd_rate(ctlr); ret = __joycon_hid_send(ctlr->hdev, (u8 *)&rumble_output, sizeof(rumble_output)); return ret; } static void joycon_rumble_worker(struct work_struct *work) { struct joycon_ctlr *ctlr = container_of(work, struct joycon_ctlr, rumble_worker); unsigned long flags; bool again = true; int ret; while (again) { mutex_lock(&ctlr->output_mutex); ret = joycon_send_rumble_data(ctlr); mutex_unlock(&ctlr->output_mutex); /* -ENODEV means the controller was just unplugged */ spin_lock_irqsave(&ctlr->lock, flags); if (ret < 0 && ret != -ENODEV && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED) hid_warn(ctlr->hdev, "Failed to set rumble; e=%d", ret); ctlr->rumble_msecs = jiffies_to_msecs(jiffies); if (ctlr->rumble_queue_tail != ctlr->rumble_queue_head) { if (++ctlr->rumble_queue_tail >= JC_RUMBLE_QUEUE_SIZE) ctlr->rumble_queue_tail = 0; } else { again = false; } spin_unlock_irqrestore(&ctlr->lock, flags); } } #if IS_ENABLED(CONFIG_NINTENDO_FF) static struct joycon_rumble_freq_data joycon_find_rumble_freq(u16 freq) { const size_t length = ARRAY_SIZE(joycon_rumble_frequencies); const struct joycon_rumble_freq_data *data = joycon_rumble_frequencies; int i = 0; if (freq > data[0].freq) { for (i = 1; i < length - 1; i++) { if (freq > data[i - 1].freq && freq <= data[i].freq) break; } } return data[i]; } static struct joycon_rumble_amp_data joycon_find_rumble_amp(u16 amp) { const size_t length = ARRAY_SIZE(joycon_rumble_amplitudes); const struct joycon_rumble_amp_data *data = joycon_rumble_amplitudes; int i = 0; if (amp > data[0].amp) { for (i = 1; i < length - 1; i++) { if (amp > data[i - 1].amp && amp <= data[i].amp) break; } } return data[i]; } static void joycon_encode_rumble(u8 *data, u16 freq_low, u16 freq_high, u16 amp) { struct joycon_rumble_freq_data freq_data_low; struct joycon_rumble_freq_data freq_data_high; struct joycon_rumble_amp_data amp_data; freq_data_low = joycon_find_rumble_freq(freq_low); freq_data_high = joycon_find_rumble_freq(freq_high); amp_data = joycon_find_rumble_amp(amp); data[0] = (freq_data_high.high >> 8) & 0xFF; data[1] = (freq_data_high.high & 0xFF) + amp_data.high; data[2] = freq_data_low.low + ((amp_data.low >> 8) & 0xFF); data[3] = amp_data.low & 0xFF; } static const u16 JOYCON_MAX_RUMBLE_HIGH_FREQ = 1253; static const u16 JOYCON_MIN_RUMBLE_HIGH_FREQ = 82; static const u16 JOYCON_MAX_RUMBLE_LOW_FREQ = 626; static const u16 JOYCON_MIN_RUMBLE_LOW_FREQ = 41; static void joycon_clamp_rumble_freqs(struct joycon_ctlr *ctlr) { unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); ctlr->rumble_ll_freq = clamp(ctlr->rumble_ll_freq, JOYCON_MIN_RUMBLE_LOW_FREQ, JOYCON_MAX_RUMBLE_LOW_FREQ); ctlr->rumble_lh_freq = clamp(ctlr->rumble_lh_freq, JOYCON_MIN_RUMBLE_HIGH_FREQ, JOYCON_MAX_RUMBLE_HIGH_FREQ); ctlr->rumble_rl_freq = clamp(ctlr->rumble_rl_freq, JOYCON_MIN_RUMBLE_LOW_FREQ, JOYCON_MAX_RUMBLE_LOW_FREQ); ctlr->rumble_rh_freq = clamp(ctlr->rumble_rh_freq, JOYCON_MIN_RUMBLE_HIGH_FREQ, JOYCON_MAX_RUMBLE_HIGH_FREQ); spin_unlock_irqrestore(&ctlr->lock, flags); } static int joycon_set_rumble(struct joycon_ctlr *ctlr, u16 amp_r, u16 amp_l, bool schedule_now) { u8 data[JC_RUMBLE_DATA_SIZE]; u16 amp; u16 freq_r_low; u16 freq_r_high; u16 freq_l_low; u16 freq_l_high; unsigned long flags; int next_rq_head; spin_lock_irqsave(&ctlr->lock, flags); freq_r_low = ctlr->rumble_rl_freq; freq_r_high = ctlr->rumble_rh_freq; freq_l_low = ctlr->rumble_ll_freq; freq_l_high = ctlr->rumble_lh_freq; /* limit number of silent rumble packets to reduce traffic */ if (amp_l != 0 || amp_r != 0) ctlr->rumble_zero_countdown = JC_RUMBLE_ZERO_AMP_PKT_CNT; spin_unlock_irqrestore(&ctlr->lock, flags); /* right joy-con */ amp = amp_r * (u32)joycon_max_rumble_amp / 65535; joycon_encode_rumble(data + 4, freq_r_low, freq_r_high, amp); /* left joy-con */ amp = amp_l * (u32)joycon_max_rumble_amp / 65535; joycon_encode_rumble(data, freq_l_low, freq_l_high, amp); spin_lock_irqsave(&ctlr->lock, flags); next_rq_head = ctlr->rumble_queue_head + 1; if (next_rq_head >= JC_RUMBLE_QUEUE_SIZE) next_rq_head = 0; /* Did we overrun the circular buffer? * If so, be sure we keep the latest intended rumble state. */ if (next_rq_head == ctlr->rumble_queue_tail) { hid_dbg(ctlr->hdev, "rumble queue is full"); /* overwrite the prior value at the end of the circular buf */ next_rq_head = ctlr->rumble_queue_head; } ctlr->rumble_queue_head = next_rq_head; memcpy(ctlr->rumble_data[ctlr->rumble_queue_head], data, JC_RUMBLE_DATA_SIZE); /* don't wait for the periodic send (reduces latency) */ if (schedule_now && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED) queue_work(ctlr->rumble_queue, &ctlr->rumble_worker); spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } static int joycon_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct joycon_ctlr *ctlr = input_get_drvdata(dev); if (effect->type != FF_RUMBLE) return 0; return joycon_set_rumble(ctlr, effect->u.rumble.weak_magnitude, effect->u.rumble.strong_magnitude, true); } #endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */ static void joycon_config_left_stick(struct input_dev *idev) { input_set_abs_params(idev, ABS_X, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); input_set_abs_params(idev, ABS_Y, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); } static void joycon_config_right_stick(struct input_dev *idev) { input_set_abs_params(idev, ABS_RX, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); input_set_abs_params(idev, ABS_RY, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); } static void joycon_config_dpad(struct input_dev *idev) { input_set_abs_params(idev, ABS_HAT0X, -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG, JC_DPAD_FUZZ, JC_DPAD_FLAT); input_set_abs_params(idev, ABS_HAT0Y, -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG, JC_DPAD_FUZZ, JC_DPAD_FLAT); } static void joycon_config_buttons(struct input_dev *idev, const struct joycon_ctlr_button_mapping button_mappings[]) { const struct joycon_ctlr_button_mapping *button; for (button = button_mappings; button->code; button++) input_set_capability(idev, EV_KEY, button->code); } static void joycon_config_rumble(struct joycon_ctlr *ctlr) { #if IS_ENABLED(CONFIG_NINTENDO_FF) /* set up rumble */ input_set_capability(ctlr->input, EV_FF, FF_RUMBLE); input_ff_create_memless(ctlr->input, NULL, joycon_play_effect); ctlr->rumble_ll_freq = JC_RUMBLE_DFLT_LOW_FREQ; ctlr->rumble_lh_freq = JC_RUMBLE_DFLT_HIGH_FREQ; ctlr->rumble_rl_freq = JC_RUMBLE_DFLT_LOW_FREQ; ctlr->rumble_rh_freq = JC_RUMBLE_DFLT_HIGH_FREQ; joycon_clamp_rumble_freqs(ctlr); joycon_set_rumble(ctlr, 0, 0, false); ctlr->rumble_msecs = jiffies_to_msecs(jiffies); #endif } static int joycon_imu_input_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev; const char *imu_name; int ret; hdev = ctlr->hdev; /* configure the imu input device */ ctlr->imu_input = devm_input_allocate_device(&hdev->dev); if (!ctlr->imu_input) return -ENOMEM; ctlr->imu_input->id.bustype = hdev->bus; ctlr->imu_input->id.vendor = hdev->vendor; ctlr->imu_input->id.product = hdev->product; ctlr->imu_input->id.version = hdev->version; ctlr->imu_input->uniq = ctlr->mac_addr_str; ctlr->imu_input->phys = hdev->phys; imu_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s (IMU)", ctlr->input->name); if (!imu_name) return -ENOMEM; ctlr->imu_input->name = imu_name; input_set_drvdata(ctlr->imu_input, ctlr); /* configure imu axes */ input_set_abs_params(ctlr->imu_input, ABS_X, -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG, JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT); input_set_abs_params(ctlr->imu_input, ABS_Y, -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG, JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT); input_set_abs_params(ctlr->imu_input, ABS_Z, -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG, JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT); input_abs_set_res(ctlr->imu_input, ABS_X, JC_IMU_ACCEL_RES_PER_G); input_abs_set_res(ctlr->imu_input, ABS_Y, JC_IMU_ACCEL_RES_PER_G); input_abs_set_res(ctlr->imu_input, ABS_Z, JC_IMU_ACCEL_RES_PER_G); input_set_abs_params(ctlr->imu_input, ABS_RX, -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG, JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT); input_set_abs_params(ctlr->imu_input, ABS_RY, -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG, JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT); input_set_abs_params(ctlr->imu_input, ABS_RZ, -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG, JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT); input_abs_set_res(ctlr->imu_input, ABS_RX, JC_IMU_GYRO_RES_PER_DPS); input_abs_set_res(ctlr->imu_input, ABS_RY, JC_IMU_GYRO_RES_PER_DPS); input_abs_set_res(ctlr->imu_input, ABS_RZ, JC_IMU_GYRO_RES_PER_DPS); __set_bit(EV_MSC, ctlr->imu_input->evbit); __set_bit(MSC_TIMESTAMP, ctlr->imu_input->mscbit); __set_bit(INPUT_PROP_ACCELEROMETER, ctlr->imu_input->propbit); ret = input_register_device(ctlr->imu_input); if (ret) return ret; return 0; } static int joycon_input_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev; int ret; hdev = ctlr->hdev; ctlr->input = devm_input_allocate_device(&hdev->dev); if (!ctlr->input) return -ENOMEM; ctlr->input->id.bustype = hdev->bus; ctlr->input->id.vendor = hdev->vendor; ctlr->input->id.product = hdev->product; ctlr->input->id.version = hdev->version; ctlr->input->uniq = ctlr->mac_addr_str; ctlr->input->name = hdev->name; ctlr->input->phys = hdev->phys; input_set_drvdata(ctlr->input, ctlr); ret = input_register_device(ctlr->input); if (ret) return ret; if (joycon_type_is_right_joycon(ctlr)) { joycon_config_right_stick(ctlr->input); joycon_config_buttons(ctlr->input, right_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_config_buttons(ctlr->input, right_joycon_s_button_mappings); } else if (joycon_type_is_left_joycon(ctlr)) { joycon_config_left_stick(ctlr->input); joycon_config_buttons(ctlr->input, left_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_config_buttons(ctlr->input, left_joycon_s_button_mappings); } else if (joycon_type_is_procon(ctlr)) { joycon_config_left_stick(ctlr->input); joycon_config_right_stick(ctlr->input); joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, procon_button_mappings); } else if (joycon_type_is_any_nescon(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, nescon_button_mappings); } else if (joycon_type_is_snescon(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, snescon_button_mappings); } else if (joycon_type_is_gencon(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, gencon_button_mappings); } else if (joycon_type_is_n64con(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_left_stick(ctlr->input); joycon_config_buttons(ctlr->input, n64con_button_mappings); } if (joycon_has_imu(ctlr)) { ret = joycon_imu_input_create(ctlr); if (ret) return ret; } if (joycon_has_rumble(ctlr)) joycon_config_rumble(ctlr); return 0; } /* Because the subcommand sets all the leds at once, the brightness argument is ignored */ static int joycon_player_led_brightness_set(struct led_classdev *led, enum led_brightness brightness) { struct device *dev = led->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct joycon_ctlr *ctlr; int val = 0; int i; int ret; ctlr = hid_get_drvdata(hdev); if (!ctlr) { hid_err(hdev, "No controller data\n"); return -ENODEV; } for (i = 0; i < JC_NUM_LEDS; i++) val |= ctlr->leds[i].brightness << i; mutex_lock(&ctlr->output_mutex); ret = joycon_set_player_leds(ctlr, 0, val); mutex_unlock(&ctlr->output_mutex); return ret; } static int joycon_home_led_brightness_set(struct led_classdev *led, enum led_brightness brightness) { struct device *dev = led->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct joycon_ctlr *ctlr; int ret; ctlr = hid_get_drvdata(hdev); if (!ctlr) { hid_err(hdev, "No controller data\n"); return -ENODEV; } mutex_lock(&ctlr->output_mutex); ret = joycon_set_home_led(ctlr, brightness); mutex_unlock(&ctlr->output_mutex); return ret; } static DEFINE_IDA(nintendo_player_id_allocator); static int joycon_leds_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev = ctlr->hdev; struct device *dev = &hdev->dev; const char *d_name = dev_name(dev); struct led_classdev *led; int led_val = 0; char *name; int ret; int i; int player_led_pattern; /* configure the player LEDs */ ctlr->player_id = U32_MAX; ret = ida_alloc(&nintendo_player_id_allocator, GFP_KERNEL); if (ret < 0) { hid_warn(hdev, "Failed to allocate player ID, skipping; ret=%d\n", ret); goto home_led; } ctlr->player_id = ret; player_led_pattern = ret % JC_NUM_LED_PATTERNS; hid_info(ctlr->hdev, "assigned player %d led pattern", player_led_pattern + 1); for (i = 0; i < JC_NUM_LEDS; i++) { name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s:%s", d_name, "green", joycon_player_led_names[i]); if (!name) return -ENOMEM; led = &ctlr->leds[i]; led->name = name; led->brightness = joycon_player_led_patterns[player_led_pattern][i]; led->max_brightness = 1; led->brightness_set_blocking = joycon_player_led_brightness_set; led->flags = LED_CORE_SUSPENDRESUME | LED_HW_PLUGGABLE; led_val |= joycon_player_led_patterns[player_led_pattern][i] << i; } mutex_lock(&ctlr->output_mutex); ret = joycon_set_player_leds(ctlr, 0, led_val); mutex_unlock(&ctlr->output_mutex); if (ret) { hid_warn(hdev, "Failed to set players LEDs, skipping registration; ret=%d\n", ret); goto home_led; } for (i = 0; i < JC_NUM_LEDS; i++) { led = &ctlr->leds[i]; ret = devm_led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "Failed to register player %d LED; ret=%d\n", i + 1, ret); return ret; } } home_led: /* configure the home LED */ if (jc_type_has_right(ctlr)) { name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s:%s", d_name, "blue", LED_FUNCTION_PLAYER5); if (!name) return -ENOMEM; led = &ctlr->home_led; led->name = name; led->brightness = 0; led->max_brightness = 0xF; led->brightness_set_blocking = joycon_home_led_brightness_set; led->flags = LED_CORE_SUSPENDRESUME | LED_HW_PLUGGABLE; /* Set the home LED to 0 as default state */ mutex_lock(&ctlr->output_mutex); ret = joycon_set_home_led(ctlr, 0); mutex_unlock(&ctlr->output_mutex); if (ret) { hid_warn(hdev, "Failed to set home LED, skipping registration; ret=%d\n", ret); return 0; } ret = devm_led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "Failed to register home LED; ret=%d\n", ret); return ret; } } return 0; } static int joycon_battery_get_property(struct power_supply *supply, enum power_supply_property prop, union power_supply_propval *val) { struct joycon_ctlr *ctlr = power_supply_get_drvdata(supply); unsigned long flags; int ret = 0; u8 capacity; bool charging; bool powered; spin_lock_irqsave(&ctlr->lock, flags); capacity = ctlr->battery_capacity; charging = ctlr->battery_charging; powered = ctlr->host_powered; spin_unlock_irqrestore(&ctlr->lock, flags); switch (prop) { case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = capacity; break; case POWER_SUPPLY_PROP_STATUS: if (charging) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (capacity == POWER_SUPPLY_CAPACITY_LEVEL_FULL && powered) val->intval = POWER_SUPPLY_STATUS_FULL; else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property joycon_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_STATUS, }; static int joycon_power_supply_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev = ctlr->hdev; struct power_supply_config supply_config = { .drv_data = ctlr, }; const char * const name_fmt = "nintendo_switch_controller_battery_%s"; int ret = 0; /* Set initially to unknown before receiving first input report */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; /* Configure the battery's description */ ctlr->battery_desc.properties = joycon_battery_props; ctlr->battery_desc.num_properties = ARRAY_SIZE(joycon_battery_props); ctlr->battery_desc.get_property = joycon_battery_get_property; ctlr->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; ctlr->battery_desc.use_for_apm = 0; ctlr->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, name_fmt, dev_name(&hdev->dev)); if (!ctlr->battery_desc.name) return -ENOMEM; ctlr->battery = devm_power_supply_register(&hdev->dev, &ctlr->battery_desc, &supply_config); if (IS_ERR(ctlr->battery)) { ret = PTR_ERR(ctlr->battery); hid_err(hdev, "Failed to register battery; ret=%d\n", ret); return ret; } return power_supply_powers(ctlr->battery, &hdev->dev); } static int joycon_read_info(struct joycon_ctlr *ctlr) { int ret; int i; int j; struct joycon_subcmd_request req = { 0 }; struct joycon_input_report *report; req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO; ret = joycon_send_subcmd(ctlr, &req, 0, HZ); if (ret) { hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret); return ret; } report = (struct joycon_input_report *)ctlr->input_buf; for (i = 4, j = 0; j < 6; i++, j++) ctlr->mac_addr[j] = report->subcmd_reply.data[i]; ctlr->mac_addr_str = devm_kasprintf(&ctlr->hdev->dev, GFP_KERNEL, "%02X:%02X:%02X:%02X:%02X:%02X", ctlr->mac_addr[0], ctlr->mac_addr[1], ctlr->mac_addr[2], ctlr->mac_addr[3], ctlr->mac_addr[4], ctlr->mac_addr[5]); if (!ctlr->mac_addr_str) return -ENOMEM; hid_info(ctlr->hdev, "controller MAC = %s\n", ctlr->mac_addr_str); /* * Retrieve the type so we can distinguish the controller type * Unfortantly the hdev->product can't always be used due to a ?bug? * with the NSO Genesis controller. Over USB, it will report the * PID as 0x201E, but over bluetooth it will report the PID as 0x2017 * which is the same as the NSO SNES controller. This is different from * the rest of the controllers which will report the same PID over USB * and bluetooth. */ ctlr->ctlr_type = report->subcmd_reply.data[2]; hid_dbg(ctlr->hdev, "controller type = 0x%02X\n", ctlr->ctlr_type); return 0; } static int joycon_init(struct hid_device *hdev) { struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); int ret = 0; mutex_lock(&ctlr->output_mutex); /* if handshake command fails, assume ble pro controller */ if (joycon_using_usb(ctlr) && !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { hid_dbg(hdev, "detected USB controller\n"); /* set baudrate for improved latency */ ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); if (ret) { /* * We can function with the default baudrate. * Provide a warning, and continue on. */ hid_warn(hdev, "Failed to set baudrate (ret=%d), continuing anyway\n", ret); } /* handshake */ ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); if (ret) { hid_err(hdev, "Failed handshake; ret=%d\n", ret); goto out_unlock; } /* * Set no timeout (to keep controller in USB mode). * This doesn't send a response, so ignore the timeout. */ joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); } else if (jc_type_is_chrggrip(ctlr)) { hid_err(hdev, "Failed charging grip handshake\n"); ret = -ETIMEDOUT; goto out_unlock; } /* needed to retrieve the controller type */ ret = joycon_read_info(ctlr); if (ret) { hid_err(hdev, "Failed to retrieve controller info; ret=%d\n", ret); goto out_unlock; } if (joycon_has_joysticks(ctlr)) { /* get controller calibration data, and parse it */ ret = joycon_request_calibration(ctlr); if (ret) { /* * We can function with default calibration, but it may be * inaccurate. Provide a warning, and continue on. */ hid_warn(hdev, "Analog stick positions may be inaccurate\n"); } } if (joycon_has_imu(ctlr)) { /* get IMU calibration data, and parse it */ ret = joycon_request_imu_calibration(ctlr); if (ret) { /* * We can function with default calibration, but it may be * inaccurate. Provide a warning, and continue on. */ hid_warn(hdev, "Unable to read IMU calibration data\n"); } /* Enable the IMU */ ret = joycon_enable_imu(ctlr); if (ret) { hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); goto out_unlock; } } /* Set the reporting mode to 0x30, which is the full report mode */ ret = joycon_set_report_mode(ctlr); if (ret) { hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); goto out_unlock; } if (joycon_has_rumble(ctlr)) { /* Enable rumble */ ret = joycon_enable_rumble(ctlr); if (ret) { hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); goto out_unlock; } } out_unlock: mutex_unlock(&ctlr->output_mutex); return ret; } /* Common handler for parsing inputs */ static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data, int size) { if (data[0] == JC_INPUT_SUBCMD_REPLY || data[0] == JC_INPUT_IMU_DATA || data[0] == JC_INPUT_MCU_DATA) { if (size >= 12) /* make sure it contains the input report */ joycon_parse_report(ctlr, (struct joycon_input_report *)data); } return 0; } static int joycon_ctlr_handle_event(struct joycon_ctlr *ctlr, u8 *data, int size) { int ret = 0; bool match = false; struct joycon_input_report *report; if (unlikely(mutex_is_locked(&ctlr->output_mutex)) && ctlr->msg_type != JOYCON_MSG_TYPE_NONE) { switch (ctlr->msg_type) { case JOYCON_MSG_TYPE_USB: if (size < 2) break; if (data[0] == JC_INPUT_USB_RESPONSE && data[1] == ctlr->usb_ack_match) match = true; break; case JOYCON_MSG_TYPE_SUBCMD: if (size < sizeof(struct joycon_input_report) || data[0] != JC_INPUT_SUBCMD_REPLY) break; report = (struct joycon_input_report *)data; if (report->subcmd_reply.id == ctlr->subcmd_ack_match) match = true; break; default: break; } if (match) { memcpy(ctlr->input_buf, data, min(size, (int)JC_MAX_RESP_SIZE)); ctlr->msg_type = JOYCON_MSG_TYPE_NONE; ctlr->received_resp = true; wake_up(&ctlr->wait); /* This message has been handled */ return 1; } } if (ctlr->ctlr_state == JOYCON_CTLR_STATE_READ) ret = joycon_ctlr_read_handler(ctlr, data, size); return ret; } static int nintendo_hid_event(struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int size) { struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); if (size < 1) return -EINVAL; return joycon_ctlr_handle_event(ctlr, raw_data, size); } static int nintendo_hid_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct joycon_ctlr *ctlr; hid_dbg(hdev, "probe - start\n"); ctlr = devm_kzalloc(&hdev->dev, sizeof(*ctlr), GFP_KERNEL); if (!ctlr) { ret = -ENOMEM; goto err; } ctlr->hdev = hdev; ctlr->ctlr_state = JOYCON_CTLR_STATE_INIT; ctlr->rumble_queue_head = 0; ctlr->rumble_queue_tail = 0; hid_set_drvdata(hdev, ctlr); mutex_init(&ctlr->output_mutex); init_waitqueue_head(&ctlr->wait); spin_lock_init(&ctlr->lock); ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); if (!ctlr->rumble_queue) { ret = -ENOMEM; goto err; } INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "HID parse failed\n"); goto err_wq; } /* * Patch the hw version of pro controller/joycons, so applications can * distinguish between the default HID mappings and the mappings defined * by the Linux game controller spec. This is important for the SDL2 * library, which has a game controller database, which uses device ids * in combination with version as a key. */ hdev->version |= 0x8000; ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "HW start failed\n"); goto err_wq; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "cannot start hardware I/O\n"); goto err_stop; } hid_device_io_start(hdev); ret = joycon_init(hdev); if (ret) { hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret); goto err_close; } /* Initialize the leds */ ret = joycon_leds_create(ctlr); if (ret) { hid_err(hdev, "Failed to create leds; ret=%d\n", ret); goto err_close; } /* Initialize the battery power supply */ ret = joycon_power_supply_create(ctlr); if (ret) { hid_err(hdev, "Failed to create power_supply; ret=%d\n", ret); goto err_ida; } ret = joycon_input_create(ctlr); if (ret) { hid_err(hdev, "Failed to create input device; ret=%d\n", ret); goto err_ida; } ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; hid_dbg(hdev, "probe - success\n"); return 0; err_ida: ida_free(&nintendo_player_id_allocator, ctlr->player_id); err_close: hid_hw_close(hdev); err_stop: hid_hw_stop(hdev); err_wq: destroy_workqueue(ctlr->rumble_queue); err: hid_err(hdev, "probe - fail = %d\n", ret); return ret; } static void nintendo_hid_remove(struct hid_device *hdev) { struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); unsigned long flags; hid_dbg(hdev, "remove\n"); /* Prevent further attempts at sending subcommands. */ spin_lock_irqsave(&ctlr->lock, flags); ctlr->ctlr_state = JOYCON_CTLR_STATE_REMOVED; spin_unlock_irqrestore(&ctlr->lock, flags); destroy_workqueue(ctlr->rumble_queue); ida_free(&nintendo_player_id_allocator, ctlr->player_id); hid_hw_close(hdev); hid_hw_stop(hdev); } #ifdef CONFIG_PM static int nintendo_hid_resume(struct hid_device *hdev) { int ret = joycon_init(hdev); if (ret) hid_err(hdev, "Failed to restore controller after resume"); return ret; } #endif static const struct hid_device_id nintendo_hid_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_SNESCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_GENCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_N64CON) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_CHRGGRIP) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONL) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONR) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_SNESCON) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_GENCON) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_N64CON) }, { } }; MODULE_DEVICE_TABLE(hid, nintendo_hid_devices); static struct hid_driver nintendo_hid_driver = { .name = "nintendo", .id_table = nintendo_hid_devices, .probe = nintendo_hid_probe, .remove = nintendo_hid_remove, .raw_event = nintendo_hid_event, #ifdef CONFIG_PM .resume = nintendo_hid_resume, #endif }; static int __init nintendo_init(void) { return hid_register_driver(&nintendo_hid_driver); } static void __exit nintendo_exit(void) { hid_unregister_driver(&nintendo_hid_driver); ida_destroy(&nintendo_player_id_allocator); } module_init(nintendo_init); module_exit(nintendo_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ryan McClelland <rymcclel@gmail.com>"); MODULE_AUTHOR("Emily Strickland <linux@emily.st>"); MODULE_AUTHOR("Daniel J. Ogorchock <djogorchock@gmail.com>"); MODULE_DESCRIPTION("Driver for Nintendo Switch Controllers"); |
13 13 13 13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/balloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/time.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include "ext4.h" #include "ext4_jbd2.h" #include "mballoc.h" #include <trace/events/ext4.h> #include <kunit/static_stub.h> static unsigned ext4_num_base_meta_clusters(struct super_block *sb, ext4_group_t block_group); /* * balloc.c contains the blocks allocation and deallocation routines */ /* * Calculate block group number for a given block number */ ext4_group_t ext4_get_group_number(struct super_block *sb, ext4_fsblk_t block) { ext4_group_t group; if (test_opt2(sb, STD_GROUP_SIZE)) group = (block - le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >> (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3); else ext4_get_group_no_and_offset(sb, block, &group, NULL); return group; } /* * Calculate the block group number and offset into the block/cluster * allocation bitmap, given a block number */ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; ext4_grpblk_t offset; blocknr = blocknr - le32_to_cpu(es->s_first_data_block); offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> EXT4_SB(sb)->s_cluster_bits; if (offsetp) *offsetp = offset; if (blockgrpp) *blockgrpp = blocknr; } /* * Check whether the 'block' lives within the 'block_group'. Returns 1 if so * and 0 otherwise. */ static inline int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block, ext4_group_t block_group) { ext4_group_t actual_group; actual_group = ext4_get_group_number(sb, block); return (actual_group == block_group) ? 1 : 0; } /* * Return the number of clusters used for file system metadata; this * represents the overhead needed by the file system. */ static unsigned ext4_num_overhead_clusters(struct super_block *sb, ext4_group_t block_group, struct ext4_group_desc *gdp) { unsigned base_clusters, num_clusters; int block_cluster = -1, inode_cluster; int itbl_cluster_start = -1, itbl_cluster_end = -1; ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group); ext4_fsblk_t end = start + EXT4_BLOCKS_PER_GROUP(sb) - 1; ext4_fsblk_t itbl_blk_start, itbl_blk_end; struct ext4_sb_info *sbi = EXT4_SB(sb); /* This is the number of clusters used by the superblock, * block group descriptors, and reserved block group * descriptor blocks */ base_clusters = ext4_num_base_meta_clusters(sb, block_group); num_clusters = base_clusters; /* * Account and record inode table clusters if any cluster * is in the block group, or inode table cluster range is * [-1, -1] and won't overlap with block/inode bitmap cluster * accounted below. */ itbl_blk_start = ext4_inode_table(sb, gdp); itbl_blk_end = itbl_blk_start + sbi->s_itb_per_group - 1; if (itbl_blk_start <= end && itbl_blk_end >= start) { itbl_blk_start = max(itbl_blk_start, start); itbl_blk_end = min(itbl_blk_end, end); itbl_cluster_start = EXT4_B2C(sbi, itbl_blk_start - start); itbl_cluster_end = EXT4_B2C(sbi, itbl_blk_end - start); num_clusters += itbl_cluster_end - itbl_cluster_start + 1; /* check if border cluster is overlapped */ if (itbl_cluster_start == base_clusters - 1) num_clusters--; } /* * For the allocation bitmaps, we first need to check to see * if the block is in the block group. If it is, then check * to see if the cluster is already accounted for in the clusters * used for the base metadata cluster and inode tables cluster. * Normally all of these blocks are contiguous, so the special * case handling shouldn't be necessary except for *very* * unusual file system layouts. */ if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { block_cluster = EXT4_B2C(sbi, ext4_block_bitmap(sb, gdp) - start); if (block_cluster >= base_clusters && (block_cluster < itbl_cluster_start || block_cluster > itbl_cluster_end)) num_clusters++; } if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { inode_cluster = EXT4_B2C(sbi, ext4_inode_bitmap(sb, gdp) - start); /* * Additional check if inode bitmap is in just accounted * block_cluster */ if (inode_cluster != block_cluster && inode_cluster >= base_clusters && (inode_cluster < itbl_cluster_start || inode_cluster > itbl_cluster_end)) num_clusters++; } return num_clusters; } static unsigned int num_clusters_in_group(struct super_block *sb, ext4_group_t block_group) { unsigned int blocks; if (block_group == ext4_get_groups_count(sb) - 1) { /* * Even though mke2fs always initializes the first and * last group, just in case some other tool was used, * we need to make sure we calculate the right free * blocks. */ blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - ext4_group_first_block_no(sb, block_group); } else blocks = EXT4_BLOCKS_PER_GROUP(sb); return EXT4_NUM_B2C(EXT4_SB(sb), blocks); } /* Initializes an uninitialized block bitmap */ static int ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ext4_group_t block_group, struct ext4_group_desc *gdp) { unsigned int bit, bit_max; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t start, tmp; ASSERT(buffer_locked(bh)); if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT | EXT4_GROUP_INFO_IBITMAP_CORRUPT); return -EFSBADCRC; } memset(bh->b_data, 0, sb->s_blocksize); bit_max = ext4_num_base_meta_clusters(sb, block_group); if ((bit_max >> 3) >= bh->b_size) return -EFSCORRUPTED; for (bit = 0; bit < bit_max; bit++) ext4_set_bit(bit, bh->b_data); start = ext4_group_first_block_no(sb, block_group); /* Set bits for block and inode bitmaps, and inode table */ tmp = ext4_block_bitmap(sb, gdp); if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_bitmap(sb, gdp); if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_table(sb, gdp); for (; tmp < ext4_inode_table(sb, gdp) + sbi->s_itb_per_group; tmp++) { if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); } /* * Also if the number of blocks within the group is less than * the blocksize * 8 ( which is the size of bitmap ), set rest * of the block bitmap to 1 */ ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), sb->s_blocksize * 8, bh->b_data); return 0; } /* Return the number of free blocks in a block group. It is used when * the block bitmap is uninitialized, so we can't just count the bits * in the bitmap. */ unsigned ext4_free_clusters_after_init(struct super_block *sb, ext4_group_t block_group, struct ext4_group_desc *gdp) { return num_clusters_in_group(sb, block_group) - ext4_num_overhead_clusters(sb, block_group, gdp); } /* * The free blocks are managed by bitmaps. A file system contains several * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block. Each descriptor contains the number of the bitmap block and * the free blocks count in the block. The descriptors are loaded in memory * when a file system is mounted (see ext4_fill_super). */ /** * ext4_get_group_desc() -- load group descriptor from disk * @sb: super block * @block_group: given block group * @bh: pointer to the buffer head to store the block * group descriptor */ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ext4_group_t block_group, struct buffer_head **bh) { unsigned int group_desc; unsigned int offset; ext4_group_t ngroups = ext4_get_groups_count(sb); struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); struct buffer_head *bh_p; KUNIT_STATIC_STUB_REDIRECT(ext4_get_group_desc, sb, block_group, bh); if (block_group >= ngroups) { ext4_error(sb, "block_group >= groups_count - block_group = %u," " groups_count = %u", block_group, ngroups); return NULL; } group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc); /* * sbi_array_rcu_deref returns with rcu unlocked, this is ok since * the pointer being dereferenced won't be dereferenced again. By * looking at the usage in add_new_gdb() the value isn't modified, * just the pointer, and so it remains valid. */ if (!bh_p) { ext4_error(sb, "Group descriptor not loaded - " "block_group = %u, group_desc = %u, desc = %u", block_group, group_desc, offset); return NULL; } desc = (struct ext4_group_desc *)( (__u8 *)bh_p->b_data + offset * EXT4_DESC_SIZE(sb)); if (bh) *bh = bh_p; return desc; } static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb, ext4_group_t block_group, struct buffer_head *bh) { ext4_grpblk_t next_zero_bit; unsigned long bitmap_size = sb->s_blocksize * 8; unsigned int offset = num_clusters_in_group(sb, block_group); if (bitmap_size <= offset) return 0; next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset); return (next_zero_bit < bitmap_size ? next_zero_bit : 0); } struct ext4_group_info *ext4_get_group_info(struct super_block *sb, ext4_group_t group) { struct ext4_group_info **grp_info; long indexv, indexh; if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) return NULL; indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); return grp_info[indexh]; } /* * Return the block number which was discovered to be invalid, or 0 if * the block bitmap is valid. */ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, struct ext4_group_desc *desc, ext4_group_t block_group, struct buffer_head *bh) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t offset; ext4_grpblk_t next_zero_bit; ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb); ext4_fsblk_t blk; ext4_fsblk_t group_first_block; if (ext4_has_feature_flex_bg(sb)) { /* with FLEX_BG, the inode/block bitmaps and itable * blocks may not be in the group at all * so the bitmap validation will be skipped for those groups * or it has to also read the block group where the bitmaps * are located to verify they are set. */ return 0; } group_first_block = ext4_group_first_block_no(sb, block_group); /* check whether block bitmap block number is set */ blk = ext4_block_bitmap(sb, desc); offset = blk - group_first_block; if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) /* bad block bitmap */ return blk; /* check whether the inode bitmap block number is set */ blk = ext4_inode_bitmap(sb, desc); offset = blk - group_first_block; if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) /* bad block bitmap */ return blk; /* check whether the inode table block number is set */ blk = ext4_inode_table(sb, desc); offset = blk - group_first_block; if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) >= max_bit) return blk; next_zero_bit = ext4_find_next_zero_bit(bh->b_data, EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) + 1, EXT4_B2C(sbi, offset)); if (next_zero_bit < EXT4_B2C(sbi, offset + sbi->s_itb_per_group - 1) + 1) /* bad bitmap for inode tables */ return blk; return 0; } static int ext4_validate_block_bitmap(struct super_block *sb, struct ext4_group_desc *desc, ext4_group_t block_group, struct buffer_head *bh) { ext4_fsblk_t blk; struct ext4_group_info *grp; if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) return 0; grp = ext4_get_group_info(sb, block_group); if (buffer_verified(bh)) return 0; if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) return -EFSCORRUPTED; ext4_lock_group(sb, block_group); if (buffer_verified(bh)) goto verified; if (unlikely(!ext4_block_bitmap_csum_verify(sb, desc, bh) || ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_CRC))) { ext4_unlock_group(sb, block_group); ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); return -EFSBADCRC; } blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); if (unlikely(blk != 0)) { ext4_unlock_group(sb, block_group); ext4_error(sb, "bg %u: block %llu: invalid block bitmap", block_group, blk); ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); return -EFSCORRUPTED; } blk = ext4_valid_block_bitmap_padding(sb, block_group, bh); if (unlikely(blk != 0)) { ext4_unlock_group(sb, block_group); ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set", block_group, blk); ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); return -EFSCORRUPTED; } set_buffer_verified(bh); verified: ext4_unlock_group(sb, block_group); return 0; } /** * ext4_read_block_bitmap_nowait() * @sb: super block * @block_group: given block group * @ignore_locked: ignore locked buffers * * Read the bitmap for a given block_group,and validate the * bits for block/inode/inode tables are set in the bitmaps * * Return buffer_head on success or an ERR_PTR in case of failure. */ struct buffer_head * ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group, bool ignore_locked) { struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); struct buffer_head *bh; ext4_fsblk_t bitmap_blk; int err; KUNIT_STATIC_STUB_REDIRECT(ext4_read_block_bitmap_nowait, sb, block_group, ignore_locked); desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) return ERR_PTR(-EFSCORRUPTED); bitmap_blk = ext4_block_bitmap(sb, desc); if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || (bitmap_blk >= ext4_blocks_count(sbi->s_es))) { ext4_error(sb, "Invalid block bitmap block %llu in " "block_group %u", bitmap_blk, block_group); ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); return ERR_PTR(-EFSCORRUPTED); } bh = sb_getblk(sb, bitmap_blk); if (unlikely(!bh)) { ext4_warning(sb, "Cannot get buffer for block bitmap - " "block_group = %u, block_bitmap = %llu", block_group, bitmap_blk); return ERR_PTR(-ENOMEM); } if (ignore_locked && buffer_locked(bh)) { /* buffer under IO already, return if called for prefetching */ put_bh(bh); return NULL; } if (bitmap_uptodate(bh)) goto verify; lock_buffer(bh); if (bitmap_uptodate(bh)) { unlock_buffer(bh); goto verify; } ext4_lock_group(sb, block_group); if (ext4_has_group_desc_csum(sb) && (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { if (block_group == 0) { ext4_unlock_group(sb, block_group); unlock_buffer(bh); ext4_error(sb, "Block bitmap for bg 0 marked " "uninitialized"); err = -EFSCORRUPTED; goto out; } err = ext4_init_block_bitmap(sb, bh, block_group, desc); if (err) { ext4_unlock_group(sb, block_group); unlock_buffer(bh); ext4_error(sb, "Failed to init block bitmap for group " "%u: %d", block_group, err); goto out; } set_bitmap_uptodate(bh); set_buffer_uptodate(bh); set_buffer_verified(bh); ext4_unlock_group(sb, block_group); unlock_buffer(bh); return bh; } ext4_unlock_group(sb, block_group); if (buffer_uptodate(bh)) { /* * if not uninit if bh is uptodate, * bitmap is also uptodate */ set_bitmap_uptodate(bh); unlock_buffer(bh); goto verify; } /* * submit the buffer_head for reading */ set_buffer_new(bh); trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked); ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO | (ignore_locked ? REQ_RAHEAD : 0), ext4_end_bitmap_read); return bh; verify: err = ext4_validate_block_bitmap(sb, desc, block_group, bh); if (err) goto out; return bh; out: put_bh(bh); return ERR_PTR(err); } /* Returns 0 on success, -errno on error */ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, struct buffer_head *bh) { struct ext4_group_desc *desc; KUNIT_STATIC_STUB_REDIRECT(ext4_wait_block_bitmap, sb, block_group, bh); if (!buffer_new(bh)) return 0; desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) return -EFSCORRUPTED; wait_on_buffer(bh); ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO); if (!buffer_uptodate(bh)) { ext4_error_err(sb, EIO, "Cannot read block bitmap - " "block_group = %u, block_bitmap = %llu", block_group, (unsigned long long) bh->b_blocknr); ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); return -EIO; } clear_buffer_new(bh); /* Panic or remount fs read-only if block bitmap is invalid */ return ext4_validate_block_bitmap(sb, desc, block_group, bh); } struct buffer_head * ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) { struct buffer_head *bh; int err; bh = ext4_read_block_bitmap_nowait(sb, block_group, false); if (IS_ERR(bh)) return bh; err = ext4_wait_block_bitmap(sb, block_group, bh); if (err) { put_bh(bh); return ERR_PTR(err); } return bh; } /** * ext4_has_free_clusters() * @sbi: in-core super block structure. * @nclusters: number of needed blocks * @flags: flags from ext4_mb_new_blocks() * * Check if filesystem has nclusters free & available for allocation. * On success return 1, return 0 on failure. */ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, s64 nclusters, unsigned int flags) { s64 free_clusters, dirty_clusters, rsv, resv_clusters; struct percpu_counter *fcc = &sbi->s_freeclusters_counter; struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; free_clusters = percpu_counter_read_positive(fcc); dirty_clusters = percpu_counter_read_positive(dcc); resv_clusters = atomic64_read(&sbi->s_resv_clusters); /* * r_blocks_count should always be multiple of the cluster ratio so * we are safe to do a plane bit shift only. */ rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) + resv_clusters; if (free_clusters - (nclusters + rsv + dirty_clusters) < EXT4_FREECLUSTERS_WATERMARK) { free_clusters = percpu_counter_sum_positive(fcc); dirty_clusters = percpu_counter_sum_positive(dcc); } /* Check whether we have space after accounting for current * dirty clusters & root reserved clusters. */ if (free_clusters >= (rsv + nclusters + dirty_clusters)) return 1; /* Hm, nope. Are (enough) root reserved clusters available? */ if (uid_eq(sbi->s_resuid, current_fsuid()) || (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || capable(CAP_SYS_RESOURCE) || (flags & EXT4_MB_USE_ROOT_BLOCKS)) { if (free_clusters >= (nclusters + dirty_clusters + resv_clusters)) return 1; } /* No free blocks. Let's see if we can dip into reserved pool */ if (flags & EXT4_MB_USE_RESERVED) { if (free_clusters >= (nclusters + dirty_clusters)) return 1; } return 0; } int ext4_claim_free_clusters(struct ext4_sb_info *sbi, s64 nclusters, unsigned int flags) { if (ext4_has_free_clusters(sbi, nclusters, flags)) { percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters); return 0; } else return -ENOSPC; } /** * ext4_should_retry_alloc() - check if a block allocation should be retried * @sb: superblock * @retries: number of retry attempts made so far * * ext4_should_retry_alloc() is called when ENOSPC is returned while * attempting to allocate blocks. If there's an indication that a pending * journal transaction might free some space and allow another attempt to * succeed, this function will wait for the current or committing transaction * to complete and then return TRUE. */ int ext4_should_retry_alloc(struct super_block *sb, int *retries) { struct ext4_sb_info *sbi = EXT4_SB(sb); if (!sbi->s_journal) return 0; if (++(*retries) > 3) { percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit); return 0; } /* * if there's no indication that blocks are about to be freed it's * possible we just missed a transaction commit that did so */ smp_mb(); if (sbi->s_mb_free_pending == 0) { if (test_opt(sb, DISCARD)) { atomic_inc(&sbi->s_retry_alloc_pending); flush_work(&sbi->s_discard_work); atomic_dec(&sbi->s_retry_alloc_pending); } return ext4_has_free_clusters(sbi, 1, 0); } /* * it's possible we've just missed a transaction commit here, * so ignore the returned status */ ext4_debug("%s: retrying operation after ENOSPC\n", sb->s_id); (void) jbd2_journal_force_commit_nested(sbi->s_journal); return 1; } /* * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks * * @handle: handle to this transaction * @inode: file inode * @goal: given target block(filesystem wide) * @count: pointer to total number of clusters needed * @errp: error code * * Return 1st allocated block number on success, *count stores total account * error stores in errp pointer */ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ext4_fsblk_t goal, unsigned int flags, unsigned long *count, int *errp) { struct ext4_allocation_request ar; ext4_fsblk_t ret; memset(&ar, 0, sizeof(ar)); /* Fill with neighbour allocated blocks */ ar.inode = inode; ar.goal = goal; ar.len = count ? *count : 1; ar.flags = flags; ret = ext4_mb_new_blocks(handle, &ar, errp); if (count) *count = ar.len; /* * Account for the allocated meta blocks. We will never * fail EDQUOT for metdata, but we do account for it. */ if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) { dquot_alloc_block_nofail(inode, EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); } return ret; } /** * ext4_count_free_clusters() -- count filesystem free clusters * @sb: superblock * * Adds up the number of free clusters from each block group. */ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) { ext4_fsblk_t desc_count; struct ext4_group_desc *gdp; ext4_group_t i; ext4_group_t ngroups = ext4_get_groups_count(sb); struct ext4_group_info *grp; #ifdef EXT4FS_DEBUG struct ext4_super_block *es; ext4_fsblk_t bitmap_count; unsigned int x; struct buffer_head *bitmap_bh = NULL; es = EXT4_SB(sb)->s_es; desc_count = 0; bitmap_count = 0; gdp = NULL; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; grp = NULL; if (EXT4_SB(sb)->s_group_info) grp = ext4_get_group_info(sb, i); if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) desc_count += ext4_free_group_clusters(sb, gdp); brelse(bitmap_bh); bitmap_bh = ext4_read_block_bitmap(sb, i); if (IS_ERR(bitmap_bh)) { bitmap_bh = NULL; continue; } x = ext4_count_free(bitmap_bh->b_data, EXT4_CLUSTERS_PER_GROUP(sb) / 8); printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", i, ext4_free_group_clusters(sb, gdp), x); bitmap_count += x; } brelse(bitmap_bh); printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" ", computed = %llu, %llu\n", EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), desc_count, bitmap_count); return bitmap_count; #else desc_count = 0; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue; grp = NULL; if (EXT4_SB(sb)->s_group_info) grp = ext4_get_group_info(sb, i); if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) desc_count += ext4_free_group_clusters(sb, gdp); } return desc_count; #endif } static inline int test_root(ext4_group_t a, int b) { while (1) { if (a < b) return 0; if (a == b) return 1; if ((a % b) != 0) return 0; a = a / b; } } /** * ext4_bg_has_super - number of blocks used by the superblock in group * @sb: superblock for filesystem * @group: group number to check * * Return the number of blocks used by the superblock (primary or backup) * in this group. Currently this will be only 0 or 1. */ int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (group == 0) return 1; if (ext4_has_feature_sparse_super2(sb)) { if (group == le32_to_cpu(es->s_backup_bgs[0]) || group == le32_to_cpu(es->s_backup_bgs[1])) return 1; return 0; } if ((group <= 1) || !ext4_has_feature_sparse_super(sb)) return 1; if (!(group & 1)) return 0; if (test_root(group, 3) || (test_root(group, 5)) || test_root(group, 7)) return 1; return 0; } static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, ext4_group_t group) { unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; if (group == first || group == first + 1 || group == last) return 1; return 0; } static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, ext4_group_t group) { if (!ext4_bg_has_super(sb, group)) return 0; if (ext4_has_feature_meta_bg(sb)) return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); else return EXT4_SB(sb)->s_gdb_count; } /** * ext4_bg_num_gdb - number of blocks used by the group table in group * @sb: superblock for filesystem * @group: group number to check * * Return the number of blocks used by the group descriptor table * (primary or backup) in this group. In the future there may be a * different number of descriptor blocks in each group. */ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) { unsigned long first_meta_bg = le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) return ext4_bg_num_gdb_nometa(sb, group); return ext4_bg_num_gdb_meta(sb,group); } /* * This function returns the number of file system metadata blocks at * the beginning of a block group, including the reserved gdt blocks. */ unsigned int ext4_num_base_meta_blocks(struct super_block *sb, ext4_group_t block_group) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned num; /* Check for superblock and gdt backups in this group */ num = ext4_bg_has_super(sb, block_group); if (!ext4_has_feature_meta_bg(sb) || block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * sbi->s_desc_per_block) { if (num) { num += ext4_bg_num_gdb_nometa(sb, block_group); num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); } } else { /* For META_BG_BLOCK_GROUPS */ num += ext4_bg_num_gdb_meta(sb, block_group); } return num; } static unsigned int ext4_num_base_meta_clusters(struct super_block *sb, ext4_group_t block_group) { return EXT4_NUM_B2C(EXT4_SB(sb), ext4_num_base_meta_blocks(sb, block_group)); } /** * ext4_inode_to_goal_block - return a hint for block allocation * @inode: inode for block allocation * * Return the ideal location to start allocating blocks for a * newly created inode. */ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); ext4_group_t block_group; ext4_grpblk_t colour; int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); ext4_fsblk_t bg_start; ext4_fsblk_t last_block; block_group = ei->i_block_group; if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { /* * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME * block groups per flexgroup, reserve the first block * group for directories and special files. Regular * files will start at the second block group. This * tends to speed up directory access and improves * fsck times. */ block_group &= ~(flex_size-1); if (S_ISREG(inode->i_mode)) block_group++; } bg_start = ext4_group_first_block_no(inode->i_sb, block_group); last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; /* * If we are doing delayed allocation, we don't need take * colour into account. */ if (test_opt(inode->i_sb, DELALLOC)) return bg_start; if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) colour = (task_pid_nr(current) % 16) * (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); else colour = (task_pid_nr(current) % 16) * ((last_block - bg_start) / 16); return bg_start + colour; } |
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 5 4 1 5 5 3 5 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 2 2 4 2 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 | /* * Linux driver for Technisat DVB-S/S2 USB 2.0 device * * Copyright (C) 2010 Patrick Boettcher, * Kernel Labs Inc. PO Box 745, St James, NY 11780 * * Development was sponsored by Technisat Digital UK Limited, whose * registered office is Witan Gate House 500 - 600 Witan Gate West, * Milton Keynes, MK9 1SH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * * THIS PROGRAM IS PROVIDED "AS IS" AND BOTH THE COPYRIGHT HOLDER AND * TECHNISAT DIGITAL UK LTD DISCLAIM ALL WARRANTIES WITH REGARD TO * THIS PROGRAM INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY OR * FITNESS FOR A PARTICULAR PURPOSE. NEITHER THE COPYRIGHT HOLDER * NOR TECHNISAT DIGITAL UK LIMITED SHALL BE LIABLE FOR ANY SPECIAL, * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS PROGRAM. See the * GNU General Public License for more details. */ #define DVB_USB_LOG_PREFIX "technisat-usb2" #include "dvb-usb.h" #include "stv6110x.h" #include "stv090x.h" /* module parameters */ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (bit-mask: 1=info,2=eeprom,4=i2c,8=rc)." \ DVB_USB_DEBUG_STATUS); /* disables all LED control command and * also does not start the signal polling thread */ static int disable_led_control; module_param(disable_led_control, int, 0444); MODULE_PARM_DESC(disable_led_control, "disable LED control of the device (default: 0 - LED control is active)."); /* device private data */ struct technisat_usb2_state { struct dvb_usb_device *dev; struct delayed_work green_led_work; u8 power_state; u16 last_scan_code; u8 buf[64]; }; /* debug print helpers */ #define deb_info(args...) dprintk(debug, 0x01, args) #define deb_eeprom(args...) dprintk(debug, 0x02, args) #define deb_i2c(args...) dprintk(debug, 0x04, args) #define deb_rc(args...) dprintk(debug, 0x08, args) /* vendor requests */ #define SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST 0xB3 #define SET_FRONT_END_RESET_VENDOR_REQUEST 0xB4 #define GET_VERSION_INFO_VENDOR_REQUEST 0xB5 #define SET_GREEN_LED_VENDOR_REQUEST 0xB6 #define SET_RED_LED_VENDOR_REQUEST 0xB7 #define GET_IR_DATA_VENDOR_REQUEST 0xB8 #define SET_LED_TIMER_DIVIDER_VENDOR_REQUEST 0xB9 #define SET_USB_REENUMERATION 0xBA /* i2c-access methods */ #define I2C_SPEED_100KHZ_BIT 0x40 #define I2C_STATUS_NAK 7 #define I2C_STATUS_OK 8 static int technisat_usb2_i2c_access(struct usb_device *udev, u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) { u8 *b; int ret, actual_length; b = kmalloc(64, GFP_KERNEL); if (!b) return -ENOMEM; deb_i2c("i2c-access: %02x, tx: ", device_addr); debug_dump(tx, txlen, deb_i2c); deb_i2c(" "); if (txlen > 62) { err("i2c TX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); txlen = 62; } if (rxlen > 62) { err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); rxlen = 62; } b[0] = I2C_SPEED_100KHZ_BIT; b[1] = device_addr << 1; if (rx != NULL) { b[0] |= rxlen; b[1] |= 1; } memcpy(&b[2], tx, txlen); ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01), b, 2 + txlen, NULL, 1000); if (ret < 0) { err("i2c-error: out failed %02x = %d", device_addr, ret); goto err; } ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, 0x01), b, 64, &actual_length, 1000); if (ret < 0) { err("i2c-error: in failed %02x = %d", device_addr, ret); goto err; } if (b[0] != I2C_STATUS_OK) { err("i2c-error: %02x = %d", device_addr, b[0]); /* handle tuner-i2c-nak */ if (!(b[0] == I2C_STATUS_NAK && device_addr == 0x60 /* && device_is_technisat_usb2 */)) goto err; } deb_i2c("status: %d, ", b[0]); if (rx != NULL) { memcpy(rx, &b[2], rxlen); deb_i2c("rx (%d): ", rxlen); debug_dump(rx, rxlen, deb_i2c); } deb_i2c("\n"); err: kfree(b); return ret; } static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { int ret = 0, i; struct dvb_usb_device *d = i2c_get_adapdata(adap); /* Ensure nobody else hits the i2c bus while we're sending our sequence of messages, (such as the remote control thread) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { if (i+1 < num && msg[i+1].flags & I2C_M_RD) { ret = technisat_usb2_i2c_access(d->udev, msg[i+1].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len); if (ret != 0) break; i++; } else { ret = technisat_usb2_i2c_access(d->udev, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0); if (ret != 0) break; } } if (ret == 0) ret = i; mutex_unlock(&d->i2c_mutex); return ret; } static u32 technisat_usb2_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm technisat_usb2_i2c_algo = { .master_xfer = technisat_usb2_i2c_xfer, .functionality = technisat_usb2_i2c_func, }; #if 0 static void technisat_usb2_frontend_reset(struct usb_device *udev) { usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SET_FRONT_END_RESET_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 10, 0, NULL, 0, 500); } #endif /* LED control */ enum technisat_usb2_led_state { TECH_LED_OFF, TECH_LED_BLINK, TECH_LED_ON, TECH_LED_UNDEFINED }; static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum technisat_usb2_led_state st) { struct technisat_usb2_state *state = d->priv; u8 *led = state->buf; int ret; led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST; if (disable_led_control && st != TECH_LED_OFF) return 0; switch (st) { case TECH_LED_ON: led[1] = 0x82; break; case TECH_LED_BLINK: led[1] = 0x82; if (red) { led[2] = 0x02; led[3] = 10; led[4] = 10; } else { led[2] = 0xff; led[3] = 50; led[4] = 50; } led[5] = 1; break; default: case TECH_LED_OFF: led[1] = 0x80; break; } if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, led, 8, 500); mutex_unlock(&d->i2c_mutex); return ret; } static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green) { struct technisat_usb2_state *state = d->priv; u8 *b = state->buf; int ret; b[0] = 0; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), SET_LED_TIMER_DIVIDER_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, (red << 8) | green, 0, b, 1, 500); mutex_unlock(&d->i2c_mutex); return ret; } static void technisat_usb2_green_led_control(struct work_struct *work) { struct technisat_usb2_state *state = container_of(work, struct technisat_usb2_state, green_led_work.work); struct dvb_frontend *fe = state->dev->adapter[0].fe_adap[0].fe; if (state->power_state == 0) goto schedule; if (fe != NULL) { enum fe_status status; if (fe->ops.read_status(fe, &status) != 0) goto schedule; if (status & FE_HAS_LOCK) { u32 ber; if (fe->ops.read_ber(fe, &ber) != 0) goto schedule; if (ber > 1000) technisat_usb2_set_led(state->dev, 0, TECH_LED_BLINK); else technisat_usb2_set_led(state->dev, 0, TECH_LED_ON); } else technisat_usb2_set_led(state->dev, 0, TECH_LED_OFF); } schedule: schedule_delayed_work(&state->green_led_work, msecs_to_jiffies(500)); } /* method to find out whether the firmware has to be downloaded or not */ static int technisat_usb2_identify_state(struct usb_device *udev, const struct dvb_usb_device_properties *props, const struct dvb_usb_device_description **desc, int *cold) { int ret; u8 *version; version = kmalloc(3, GFP_KERNEL); if (!version) return -ENOMEM; /* first select the interface */ if (usb_set_interface(udev, 0, 1) != 0) err("could not set alternate setting to 0"); else info("set alternate setting"); *cold = 0; /* by default do not download a firmware - just in case something is wrong */ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), GET_VERSION_INFO_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, 3, 500); if (ret < 0) *cold = 1; else { info("firmware version: %d.%d", version[1], version[2]); *cold = 0; } kfree(version); return 0; } /* power control */ static int technisat_usb2_power_ctrl(struct dvb_usb_device *d, int level) { struct technisat_usb2_state *state = d->priv; state->power_state = level; if (disable_led_control) return 0; /* green led is turned off in any case - will be turned on when tuning */ technisat_usb2_set_led(d, 0, TECH_LED_OFF); /* red led is turned on all the time */ technisat_usb2_set_led(d, 1, TECH_LED_ON); return 0; } /* mac address reading - from the eeprom */ #if 0 static void technisat_usb2_eeprom_dump(struct dvb_usb_device *d) { u8 reg; u8 b[16]; int i, j; /* full EEPROM dump */ for (j = 0; j < 256 * 4; j += 16) { reg = j; if (technisat_usb2_i2c_access(d->udev, 0x50 + j / 256, ®, 1, b, 16) != 0) break; deb_eeprom("EEPROM: %01x%02x: ", j / 256, reg); for (i = 0; i < 16; i++) deb_eeprom("%02x ", b[i]); deb_eeprom("\n"); } } #endif static u8 technisat_usb2_calc_lrc(const u8 *b, u16 length) { u8 lrc = 0; while (--length) lrc ^= *b++; return lrc; } static int technisat_usb2_eeprom_lrc_read(struct dvb_usb_device *d, u16 offset, u8 *b, u16 length, u8 tries) { u8 bo = offset & 0xff; struct i2c_msg msg[] = { { .addr = 0x50 | ((offset >> 8) & 0x3), .buf = &bo, .len = 1 }, { .addr = 0x50 | ((offset >> 8) & 0x3), .flags = I2C_M_RD, .buf = b, .len = length } }; while (tries--) { int status; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) break; status = technisat_usb2_calc_lrc(b, length - 1) == b[length - 1]; if (status) return 0; } return -EREMOTEIO; } #define EEPROM_MAC_START 0x3f8 #define EEPROM_MAC_TOTAL 8 static int technisat_usb2_read_mac_address(struct dvb_usb_device *d, u8 mac[]) { u8 buf[EEPROM_MAC_TOTAL]; if (technisat_usb2_eeprom_lrc_read(d, EEPROM_MAC_START, buf, EEPROM_MAC_TOTAL, 4) != 0) return -ENODEV; memcpy(mac, buf, 6); return 0; } static struct stv090x_config technisat_usb2_stv090x_config; /* frontend attach */ static int technisat_usb2_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { int i; u8 gpio[3] = { 0 }; /* 0 = 2, 1 = 3, 2 = 4 */ gpio[2] = 1; /* high - voltage ? */ switch (voltage) { case SEC_VOLTAGE_13: gpio[0] = 1; break; case SEC_VOLTAGE_18: gpio[0] = 1; gpio[1] = 1; break; default: case SEC_VOLTAGE_OFF: break; } for (i = 0; i < 3; i++) if (technisat_usb2_stv090x_config.set_gpio(fe, i+2, 0, gpio[i], 0) != 0) return -EREMOTEIO; return 0; } static struct stv090x_config technisat_usb2_stv090x_config = { .device = STV0903, .demod_mode = STV090x_SINGLE, .clk_mode = STV090x_CLK_EXT, .xtal = 8000000, .address = 0x68, .ts1_mode = STV090x_TSMODE_DVBCI, .ts1_clk = 13400000, .ts1_tei = 1, .repeater_level = STV090x_RPTLEVEL_64, .tuner_bbgain = 6, }; static struct stv6110x_config technisat_usb2_stv6110x_config = { .addr = 0x60, .refclk = 16000000, .clk_div = 2, }; static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) { struct usb_device *udev = a->dev->udev; int ret; a->fe_adap[0].fe = dvb_attach(stv090x_attach, &technisat_usb2_stv090x_config, &a->dev->i2c_adap, STV090x_DEMODULATOR_0); if (a->fe_adap[0].fe) { const struct stv6110x_devctl *ctl; ctl = dvb_attach(stv6110x_attach, a->fe_adap[0].fe, &technisat_usb2_stv6110x_config, &a->dev->i2c_adap); if (ctl) { technisat_usb2_stv090x_config.tuner_init = ctl->tuner_init; technisat_usb2_stv090x_config.tuner_sleep = ctl->tuner_sleep; technisat_usb2_stv090x_config.tuner_set_mode = ctl->tuner_set_mode; technisat_usb2_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency; technisat_usb2_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency; technisat_usb2_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth; technisat_usb2_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth; technisat_usb2_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain; technisat_usb2_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain; technisat_usb2_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk; technisat_usb2_stv090x_config.tuner_get_status = ctl->tuner_get_status; /* call the init function once to initialize tuner's clock output divider and demod's master clock */ if (a->fe_adap[0].fe->ops.init) a->fe_adap[0].fe->ops.init(a->fe_adap[0].fe); if (mutex_lock_interruptible(&a->dev->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SET_IFCLK_TO_EXTERNAL_TSCLK_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, NULL, 0, 500); mutex_unlock(&a->dev->i2c_mutex); if (ret != 0) err("could not set IF_CLK to external"); a->fe_adap[0].fe->ops.set_voltage = technisat_usb2_set_voltage; /* if everything was successful assign a nice name to the frontend */ strscpy(a->fe_adap[0].fe->ops.info.name, a->dev->desc->name, sizeof(a->fe_adap[0].fe->ops.info.name)); } else { dvb_frontend_detach(a->fe_adap[0].fe); a->fe_adap[0].fe = NULL; } } technisat_usb2_set_led_timer(a->dev, 1, 1); return a->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* Remote control */ /* the device is giving providing raw IR-signals to the host mapping * it only to one remote control is just the default implementation */ #define NOMINAL_IR_BIT_TRANSITION_TIME_US 889 #define NOMINAL_IR_BIT_TIME_US (2 * NOMINAL_IR_BIT_TRANSITION_TIME_US) #define FIRMWARE_CLOCK_TICK 83333 #define FIRMWARE_CLOCK_DIVISOR 256 #define IR_PERCENT_TOLERANCE 15 #define NOMINAL_IR_BIT_TRANSITION_TICKS ((NOMINAL_IR_BIT_TRANSITION_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK) #define NOMINAL_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICKS / FIRMWARE_CLOCK_DIVISOR) #define NOMINAL_IR_BIT_TIME_TICKS ((NOMINAL_IR_BIT_TIME_US * 1000 * 1000) / FIRMWARE_CLOCK_TICK) #define NOMINAL_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICKS / FIRMWARE_CLOCK_DIVISOR) #define MINIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT - ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MAXIMUM_IR_BIT_TRANSITION_TICK_COUNT (NOMINAL_IR_BIT_TRANSITION_TICK_COUNT + ((NOMINAL_IR_BIT_TRANSITION_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MINIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT - ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) #define MAXIMUM_IR_BIT_TIME_TICK_COUNT (NOMINAL_IR_BIT_TIME_TICK_COUNT + ((NOMINAL_IR_BIT_TIME_TICK_COUNT * IR_PERCENT_TOLERANCE) / 100)) static int technisat_usb2_get_ir(struct dvb_usb_device *d) { struct technisat_usb2_state *state = d->priv; struct ir_raw_event ev; u8 *buf = state->buf; int i, ret; buf[0] = GET_IR_DATA_VENDOR_REQUEST; buf[1] = 0x08; buf[2] = 0x8f; buf[3] = MINIMUM_IR_BIT_TRANSITION_TICK_COUNT; buf[4] = MAXIMUM_IR_BIT_TIME_TICK_COUNT; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), GET_IR_DATA_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, buf, 5, 500); if (ret < 0) goto unlock; buf[1] = 0; buf[2] = 0; ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), GET_IR_DATA_VENDOR_REQUEST, USB_TYPE_VENDOR | USB_DIR_IN, 0x8080, 0, buf, 62, 500); unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) return ret; if (ret == 1) return 0; /* no key pressed */ /* decoding */ #if 0 deb_rc("RC: %d ", ret); debug_dump(buf + 1, ret, deb_rc); #endif ev.pulse = 0; for (i = 1; i < ARRAY_SIZE(state->buf); i++) { if (buf[i] == 0xff) { ev.pulse = 0; ev.duration = 889 * 2; ir_raw_event_store(d->rc_dev, &ev); break; } ev.pulse = !ev.pulse; ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / (1000 * 1000); ir_raw_event_store(d->rc_dev, &ev); } ir_raw_event_handle(d->rc_dev); return 1; } static int technisat_usb2_rc_query(struct dvb_usb_device *d) { int ret = technisat_usb2_get_ir(d); if (ret < 0) return ret; if (ret == 0) return 0; if (!disable_led_control) technisat_usb2_set_led(d, 1, TECH_LED_BLINK); return 0; } /* DVB-USB and USB stuff follows */ enum { TECHNISAT_USB2_DVB_S2, }; static struct usb_device_id technisat_usb2_id_table[] = { DVB_USB_DEV(TECHNISAT, TECHNISAT_USB2_DVB_S2), { } }; MODULE_DEVICE_TABLE(usb, technisat_usb2_id_table); /* device description */ static struct dvb_usb_device_properties technisat_usb2_devices = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .identify_state = technisat_usb2_identify_state, .firmware = "dvb-usb-SkyStar_USB_HD_FW_v17_63.HEX.fw", .size_of_priv = sizeof(struct technisat_usb2_state), .i2c_algo = &technisat_usb2_i2c_algo, .power_ctrl = technisat_usb2_power_ctrl, .read_mac_address = technisat_usb2_read_mac_address, .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = technisat_usb2_frontend_attach, .stream = { .type = USB_ISOC, .count = 4, .endpoint = 0x2, .u = { .isoc = { .framesperurb = 32, .framesize = 2048, .interval = 1, } } }, }}, .size_of_priv = 0, }, }, .num_device_descs = 1, .devices = { { "Technisat SkyStar USB HD (DVB-S/S2)", { &technisat_usb2_id_table[TECHNISAT_USB2_DVB_S2], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_TECHNISAT_USB2, .module_name = "technisat-usb2", .rc_query = technisat_usb2_rc_query, .allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER, .driver_type = RC_DRIVER_IR_RAW, } }; static int technisat_usb2_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *dev; if (dvb_usb_device_init(intf, &technisat_usb2_devices, THIS_MODULE, &dev, adapter_nr) != 0) return -ENODEV; if (dev) { struct technisat_usb2_state *state = dev->priv; state->dev = dev; if (!disable_led_control) { INIT_DELAYED_WORK(&state->green_led_work, technisat_usb2_green_led_control); schedule_delayed_work(&state->green_led_work, msecs_to_jiffies(500)); } } return 0; } static void technisat_usb2_disconnect(struct usb_interface *intf) { struct dvb_usb_device *dev = usb_get_intfdata(intf); /* work and stuff was only created when the device is hot-state */ if (dev != NULL) { struct technisat_usb2_state *state = dev->priv; if (state != NULL) cancel_delayed_work_sync(&state->green_led_work); } dvb_usb_device_exit(intf); } static struct usb_driver technisat_usb2_driver = { .name = "dvb_usb_technisat_usb2", .probe = technisat_usb2_probe, .disconnect = technisat_usb2_disconnect, .id_table = technisat_usb2_id_table, }; module_usb_driver(technisat_usb2_driver); MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>"); MODULE_DESCRIPTION("Driver for Technisat DVB-S/S2 USB 2.0 device"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); |
60 60 60 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM writeback #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_WRITEBACK_H #include <linux/tracepoint.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #define show_inode_state(state) \ __print_flags(state, "|", \ {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ {I_NEW, "I_NEW"}, \ {I_WILL_FREE, "I_WILL_FREE"}, \ {I_FREEING, "I_FREEING"}, \ {I_CLEAR, "I_CLEAR"}, \ {I_SYNC, "I_SYNC"}, \ {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ {I_REFERENCED, "I_REFERENCED"}, \ {I_LINKABLE, "I_LINKABLE"}, \ {I_WB_SWITCH, "I_WB_SWITCH"}, \ {I_OVL_INUSE, "I_OVL_INUSE"}, \ {I_CREATING, "I_CREATING"}, \ {I_DONTCACHE, "I_DONTCACHE"}, \ {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \ {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \ {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \ ) /* enums need to be exported to user space */ #undef EM #undef EMe #define EM(a,b) TRACE_DEFINE_ENUM(a); #define EMe(a,b) TRACE_DEFINE_ENUM(a); #define WB_WORK_REASON \ EM( WB_REASON_BACKGROUND, "background") \ EM( WB_REASON_VMSCAN, "vmscan") \ EM( WB_REASON_SYNC, "sync") \ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ EM( WB_REASON_FORKER_THREAD, "forker_thread") \ EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush") WB_WORK_REASON /* * Now redefine the EM() and EMe() macros to map the enums to the strings * that will be printed in the output. */ #undef EM #undef EMe #define EM(a,b) { a, b }, #define EMe(a,b) { a, b } struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_folio_template, TP_PROTO(struct folio *folio, struct address_space *mapping), TP_ARGS(folio, mapping), TP_STRUCT__entry ( __array(char, name, 32) __field(ino_t, ino) __field(pgoff_t, index) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : NULL), 32); __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0; __entry->index = folio->index; ), TP_printk("bdi %s: ino=%lu index=%lu", __entry->name, (unsigned long)__entry->ino, __entry->index ) ); DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio, TP_PROTO(struct folio *folio, struct address_space *mapping), TP_ARGS(folio, mapping) ); DEFINE_EVENT(writeback_folio_template, folio_wait_writeback, TP_PROTO(struct folio *folio, struct address_space *mapping), TP_ARGS(folio, mapping) ); DECLARE_EVENT_CLASS(writeback_dirty_inode_template, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags), TP_STRUCT__entry ( __array(char, name, 32) __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, flags) ), TP_fast_assign( struct backing_dev_info *bdi = inode_to_bdi(inode); /* may be called for files on pseudo FSes w/ unregistered bdi */ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->flags = flags; ), TP_printk("bdi %s: ino=%lu state=%s flags=%s", __entry->name, (unsigned long)__entry->ino, show_inode_state(__entry->state), show_inode_state(__entry->flags) ) ); DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags) ); DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags) ); DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags) ); #ifdef CREATE_TRACE_POINTS #ifdef CONFIG_CGROUP_WRITEBACK static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) { return cgroup_ino(wb->memcg_css->cgroup); } static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) { if (wbc->wb) return __trace_wb_assign_cgroup(wbc->wb); else return 1; } #else /* CONFIG_CGROUP_WRITEBACK */ static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) { return 1; } static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) { return 1; } #endif /* CONFIG_CGROUP_WRITEBACK */ #endif /* CREATE_TRACE_POINTS */ #ifdef CONFIG_CGROUP_WRITEBACK TRACE_EVENT(inode_foreign_history, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned int history), TP_ARGS(inode, wbc, history), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(ino_t, cgroup_ino) __field(unsigned int, history) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); __entry->history = history; ), TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x", __entry->name, (unsigned long)__entry->ino, (unsigned long)__entry->cgroup_ino, __entry->history ) ); TRACE_EVENT(inode_switch_wbs, TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb, struct bdi_writeback *new_wb), TP_ARGS(inode, old_wb, new_wb), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(ino_t, old_cgroup_ino) __field(ino_t, new_cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32); __entry->ino = inode->i_ino; __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); ), TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, (unsigned long)__entry->old_cgroup_ino, (unsigned long)__entry->new_cgroup_ino ) ); TRACE_EVENT(track_foreign_dirty, TP_PROTO(struct folio *folio, struct bdi_writeback *wb), TP_ARGS(folio, wb), TP_STRUCT__entry( __array(char, name, 32) __field(u64, bdi_id) __field(ino_t, ino) __field(unsigned int, memcg_id) __field(ino_t, cgroup_ino) __field(ino_t, page_cgroup_ino) ), TP_fast_assign( struct address_space *mapping = folio_mapping(folio); struct inode *inode = mapping ? mapping->host : NULL; strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->bdi_id = wb->bdi->id; __entry->ino = inode ? inode->i_ino : 0; __entry->memcg_id = wb->memcg_css->id; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup); ), TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu", __entry->name, __entry->bdi_id, (unsigned long)__entry->ino, __entry->memcg_id, (unsigned long)__entry->cgroup_ino, (unsigned long)__entry->page_cgroup_ino ) ); TRACE_EVENT(flush_foreign, TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id, unsigned int frn_memcg_id), TP_ARGS(wb, frn_bdi_id, frn_memcg_id), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, cgroup_ino) __field(unsigned int, frn_bdi_id) __field(unsigned int, frn_memcg_id) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); __entry->frn_bdi_id = frn_bdi_id; __entry->frn_memcg_id = frn_memcg_id; ), TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u", __entry->name, (unsigned long)__entry->cgroup_ino, __entry->frn_bdi_id, __entry->frn_memcg_id ) ); #endif DECLARE_EVENT_CLASS(writeback_write_inode_template, TP_PROTO(struct inode *inode, struct writeback_control *wbc), TP_ARGS(inode, wbc), TP_STRUCT__entry ( __array(char, name, 32) __field(ino_t, ino) __field(int, sync_mode) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, __entry->sync_mode, (unsigned long)__entry->cgroup_ino ) ); DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, TP_PROTO(struct inode *inode, struct writeback_control *wbc), TP_ARGS(inode, wbc) ); DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, TP_PROTO(struct inode *inode, struct writeback_control *wbc), TP_ARGS(inode, wbc) ); DECLARE_EVENT_CLASS(writeback_work_class, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), TP_ARGS(wb, work), TP_STRUCT__entry( __array(char, name, 32) __field(long, nr_pages) __field(dev_t, sb_dev) __field(int, sync_mode) __field(int, for_kupdate) __field(int, range_cyclic) __field(int, for_background) __field(int, reason) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->nr_pages = work->nr_pages; __entry->sb_dev = work->sb ? work->sb->s_dev : 0; __entry->sync_mode = work->sync_mode; __entry->for_kupdate = work->for_kupdate; __entry->range_cyclic = work->range_cyclic; __entry->for_background = work->for_background; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu", __entry->name, MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), __entry->nr_pages, __entry->sync_mode, __entry->for_kupdate, __entry->range_cyclic, __entry->for_background, __print_symbolic(__entry->reason, WB_WORK_REASON), (unsigned long)__entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ DEFINE_EVENT(writeback_work_class, name, \ TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \ TP_ARGS(wb, work)) DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); DEFINE_WRITEBACK_WORK_EVENT(writeback_start); DEFINE_WRITEBACK_WORK_EVENT(writeback_written); DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); TRACE_EVENT(writeback_pages_written, TP_PROTO(long pages_written), TP_ARGS(pages_written), TP_STRUCT__entry( __field(long, pages) ), TP_fast_assign( __entry->pages = pages_written; ), TP_printk("%ld", __entry->pages) ); DECLARE_EVENT_CLASS(writeback_class, TP_PROTO(struct bdi_writeback *wb), TP_ARGS(wb), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: cgroup_ino=%lu", __entry->name, (unsigned long)__entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_EVENT(name) \ DEFINE_EVENT(writeback_class, name, \ TP_PROTO(struct bdi_writeback *wb), \ TP_ARGS(wb)) DEFINE_WRITEBACK_EVENT(writeback_wake_background); TRACE_EVENT(writeback_bdi_register, TP_PROTO(struct backing_dev_info *bdi), TP_ARGS(bdi), TP_STRUCT__entry( __array(char, name, 32) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); ), TP_printk("bdi %s", __entry->name ) ); DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), TP_ARGS(wbc, bdi), TP_STRUCT__entry( __array(char, name, 32) __field(long, nr_to_write) __field(long, pages_skipped) __field(int, sync_mode) __field(int, for_kupdate) __field(int, for_background) __field(int, for_reclaim) __field(int, range_cyclic) __field(long, range_start) __field(long, range_end) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->sync_mode = wbc->sync_mode; __entry->for_kupdate = wbc->for_kupdate; __entry->for_background = wbc->for_background; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; __entry->range_start = (long)wbc->range_start; __entry->range_end = (long)wbc->range_end; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " "bgrd=%d reclm=%d cyclic=%d " "start=0x%lx end=0x%lx cgroup_ino=%lu", __entry->name, __entry->nr_to_write, __entry->pages_skipped, __entry->sync_mode, __entry->for_kupdate, __entry->for_background, __entry->for_reclaim, __entry->range_cyclic, __entry->range_start, __entry->range_end, (unsigned long)__entry->cgroup_ino ) ) #define DEFINE_WBC_EVENT(name) \ DEFINE_EVENT(wbc_class, name, \ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ TP_ARGS(wbc, bdi)) DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work, unsigned long dirtied_before, int moved), TP_ARGS(wb, work, dirtied_before, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) __field(long, age) __field(int, moved) __field(int, reason) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->older = dirtied_before; __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu", __entry->name, __entry->older, /* dirtied_before in jiffies */ __entry->age, /* dirtied_before in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), (unsigned long)__entry->cgroup_ino ) ); TRACE_EVENT(global_dirty_state, TP_PROTO(unsigned long background_thresh, unsigned long dirty_thresh ), TP_ARGS(background_thresh, dirty_thresh ), TP_STRUCT__entry( __field(unsigned long, nr_dirty) __field(unsigned long, nr_writeback) __field(unsigned long, background_thresh) __field(unsigned long, dirty_thresh) __field(unsigned long, dirty_limit) __field(unsigned long, nr_dirtied) __field(unsigned long, nr_written) ), TP_fast_assign( __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); __entry->nr_writeback = global_node_page_state(NR_WRITEBACK); __entry->nr_dirtied = global_node_page_state(NR_DIRTIED); __entry->nr_written = global_node_page_state(NR_WRITTEN); __entry->background_thresh = background_thresh; __entry->dirty_thresh = dirty_thresh; __entry->dirty_limit = global_wb_domain.dirty_limit; ), TP_printk("dirty=%lu writeback=%lu " "bg_thresh=%lu thresh=%lu limit=%lu " "dirtied=%lu written=%lu", __entry->nr_dirty, __entry->nr_writeback, __entry->background_thresh, __entry->dirty_thresh, __entry->dirty_limit, __entry->nr_dirtied, __entry->nr_written ) ); #define KBps(x) ((x) << (PAGE_SHIFT - 10)) TRACE_EVENT(bdi_dirty_ratelimit, TP_PROTO(struct bdi_writeback *wb, unsigned long dirty_rate, unsigned long task_ratelimit), TP_ARGS(wb, dirty_rate, task_ratelimit), TP_STRUCT__entry( __array(char, bdi, 32) __field(unsigned long, write_bw) __field(unsigned long, avg_write_bw) __field(unsigned long, dirty_rate) __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned long, balanced_dirty_ratelimit) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->write_bw = KBps(wb->write_bandwidth); __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); __entry->dirty_rate = KBps(dirty_rate); __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->balanced_dirty_ratelimit = KBps(wb->balanced_dirty_ratelimit); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: " "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "balanced_dirty_ratelimit=%lu cgroup_ino=%lu", __entry->bdi, __entry->write_bw, /* write bandwidth */ __entry->avg_write_bw, /* avg write bandwidth */ __entry->dirty_rate, /* bdi dirty rate */ __entry->dirty_ratelimit, /* base ratelimit */ __entry->task_ratelimit, /* ratelimit with position control */ __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ (unsigned long)__entry->cgroup_ino ) ); TRACE_EVENT(balance_dirty_pages, TP_PROTO(struct bdi_writeback *wb, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long dirty_ratelimit, unsigned long task_ratelimit, unsigned long dirtied, unsigned long period, long pause, unsigned long start_time), TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, dirty_ratelimit, task_ratelimit, dirtied, period, pause, start_time), TP_STRUCT__entry( __array( char, bdi, 32) __field(unsigned long, limit) __field(unsigned long, setpoint) __field(unsigned long, dirty) __field(unsigned long, bdi_setpoint) __field(unsigned long, bdi_dirty) __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned int, dirtied) __field(unsigned int, dirtied_pause) __field(unsigned long, paused) __field( long, pause) __field(unsigned long, period) __field( long, think) __field(ino_t, cgroup_ino) ), TP_fast_assign( unsigned long freerun = (thresh + bg_thresh) / 2; strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->limit = global_wb_domain.dirty_limit; __entry->setpoint = (global_wb_domain.dirty_limit + freerun) / 2; __entry->dirty = dirty; __entry->bdi_setpoint = __entry->setpoint * bdi_thresh / (thresh + 1); __entry->bdi_dirty = bdi_dirty; __entry->dirty_ratelimit = KBps(dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->dirtied = dirtied; __entry->dirtied_pause = current->nr_dirtied_pause; __entry->think = current->dirty_paused_when == 0 ? 0 : (long)(jiffies - current->dirty_paused_when) * 1000/HZ; __entry->period = period * 1000 / HZ; __entry->pause = pause * 1000 / HZ; __entry->paused = (jiffies - start_time) * 1000 / HZ; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: " "limit=%lu setpoint=%lu dirty=%lu " "bdi_setpoint=%lu bdi_dirty=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "dirtied=%u dirtied_pause=%u " "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu", __entry->bdi, __entry->limit, __entry->setpoint, __entry->dirty, __entry->bdi_setpoint, __entry->bdi_dirty, __entry->dirty_ratelimit, __entry->task_ratelimit, __entry->dirtied, __entry->dirtied_pause, __entry->paused, /* ms */ __entry->pause, /* ms */ __entry->period, /* ms */ __entry->think, /* ms */ (unsigned long)__entry->cgroup_ino ) ); TRACE_EVENT(writeback_sb_inodes_requeue, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, dirtied_when) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); ), TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, show_inode_state(__entry->state), __entry->dirtied_when, (jiffies - __entry->dirtied_when) / HZ, (unsigned long)__entry->cgroup_ino ) ); DECLARE_EVENT_CLASS(writeback_single_inode_template, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned long nr_to_write ), TP_ARGS(inode, wbc, nr_to_write), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, dirtied_when) __field(unsigned long, writeback_index) __field(long, nr_to_write) __field(unsigned long, wrote) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; __entry->writeback_index = inode->i_mapping->writeback_index; __entry->nr_to_write = nr_to_write; __entry->wrote = nr_to_write - wbc->nr_to_write; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, show_inode_state(__entry->state), __entry->dirtied_when, (jiffies - __entry->dirtied_when) / HZ, __entry->writeback_index, __entry->nr_to_write, __entry->wrote, (unsigned long)__entry->cgroup_ino ) ); DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned long nr_to_write), TP_ARGS(inode, wbc, nr_to_write) ); DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned long nr_to_write), TP_ARGS(inode, wbc, nr_to_write) ); DECLARE_EVENT_CLASS(writeback_inode_template, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field(unsigned long, state ) __field( __u16, mode ) __field(unsigned long, dirtied_when ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->mode = inode->i_mode; __entry->dirtied_when = inode->dirtied_when; ), TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino, __entry->dirtied_when, show_inode_state(__entry->state), __entry->mode) ); DEFINE_EVENT(writeback_inode_template, writeback_lazytime, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); /* * Inode writeback list tracking. */ DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
3080 3079 3083 3065 3072 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Generic Timer-queue * * Manages a simple queue of timers, ordered by expiration time. * Uses rbtrees for quick list adds and expiration. * * NOTE: All of the following functions need to be serialized * to avoid races. No locking is done by this library code. */ #include <linux/bug.h> #include <linux/timerqueue.h> #include <linux/rbtree.h> #include <linux/export.h> #define __node_2_tq(_n) \ rb_entry((_n), struct timerqueue_node, node) static inline bool __timerqueue_less(struct rb_node *a, const struct rb_node *b) { return __node_2_tq(a)->expires < __node_2_tq(b)->expires; } /** * timerqueue_add - Adds timer to timerqueue. * * @head: head of timerqueue * @node: timer node to be added * * Adds the timer node to the timerqueue, sorted by the node's expires * value. Returns true if the newly added timer is the first expiring timer in * the queue. */ bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) { /* Make sure we don't add nodes that are already added */ WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node)); return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less); } EXPORT_SYMBOL_GPL(timerqueue_add); /** * timerqueue_del - Removes a timer from the timerqueue. * * @head: head of timerqueue * @node: timer node to be removed * * Removes the timer node from the timerqueue. Returns true if the queue is * not empty after the remove. */ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) { WARN_ON_ONCE(RB_EMPTY_NODE(&node->node)); rb_erase_cached(&node->node, &head->rb_root); RB_CLEAR_NODE(&node->node); return !RB_EMPTY_ROOT(&head->rb_root.rb_root); } EXPORT_SYMBOL_GPL(timerqueue_del); /** * timerqueue_iterate_next - Returns the timer after the provided timer * * @node: Pointer to a timer. * * Provides the timer that is after the given node. This is used, when * necessary, to iterate through the list of timers in a timer list * without modifying the list. */ struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node) { struct rb_node *next; if (!node) return NULL; next = rb_next(&node->node); if (!next) return NULL; return container_of(next, struct timerqueue_node, node); } EXPORT_SYMBOL_GPL(timerqueue_iterate_next); |
18 18 9 6 17 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Berkeley style UIO structures - Alan Cox 1994. */ #ifndef __LINUX_UIO_H #define __LINUX_UIO_H #include <linux/kernel.h> #include <linux/thread_info.h> #include <linux/mm_types.h> #include <uapi/linux/uio.h> struct page; struct folio_queue; typedef unsigned int __bitwise iov_iter_extraction_t; struct kvec { void *iov_base; /* and that should *never* hold a userland pointer */ size_t iov_len; }; enum iter_type { /* iter types */ ITER_UBUF, ITER_IOVEC, ITER_BVEC, ITER_KVEC, ITER_FOLIOQ, ITER_XARRAY, ITER_DISCARD, }; #define ITER_SOURCE 1 // == WRITE #define ITER_DEST 0 // == READ struct iov_iter_state { size_t iov_offset; size_t count; unsigned long nr_segs; }; struct iov_iter { u8 iter_type; bool nofault; bool data_source; size_t iov_offset; /* * Hack alert: overlay ubuf_iovec with iovec + count, so * that the members resolve correctly regardless of the type * of iterator used. This means that you can use: * * &iter->__ubuf_iovec or iter->__iov * * interchangably for the user_backed cases, hence simplifying * some of the cases that need to deal with both. */ union { /* * This really should be a const, but we cannot do that without * also modifying any of the zero-filling iter init functions. * Leave it non-const for now, but it should be treated as such. */ struct iovec __ubuf_iovec; struct { union { /* use iter_iov() to get the current vec */ const struct iovec *__iov; const struct kvec *kvec; const struct bio_vec *bvec; const struct folio_queue *folioq; struct xarray *xarray; void __user *ubuf; }; size_t count; }; }; union { unsigned long nr_segs; u8 folioq_slot; loff_t xarray_start; }; }; static inline const struct iovec *iter_iov(const struct iov_iter *iter) { if (iter->iter_type == ITER_UBUF) return (const struct iovec *) &iter->__ubuf_iovec; return iter->__iov; } #define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset) #define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset) static inline enum iter_type iov_iter_type(const struct iov_iter *i) { return i->iter_type; } static inline void iov_iter_save_state(struct iov_iter *iter, struct iov_iter_state *state) { state->iov_offset = iter->iov_offset; state->count = iter->count; state->nr_segs = iter->nr_segs; } static inline bool iter_is_ubuf(const struct iov_iter *i) { return iov_iter_type(i) == ITER_UBUF; } static inline bool iter_is_iovec(const struct iov_iter *i) { return iov_iter_type(i) == ITER_IOVEC; } static inline bool iov_iter_is_kvec(const struct iov_iter *i) { return iov_iter_type(i) == ITER_KVEC; } static inline bool iov_iter_is_bvec(const struct iov_iter *i) { return iov_iter_type(i) == ITER_BVEC; } static inline bool iov_iter_is_discard(const struct iov_iter *i) { return iov_iter_type(i) == ITER_DISCARD; } static inline bool iov_iter_is_folioq(const struct iov_iter *i) { return iov_iter_type(i) == ITER_FOLIOQ; } static inline bool iov_iter_is_xarray(const struct iov_iter *i) { return iov_iter_type(i) == ITER_XARRAY; } static inline unsigned char iov_iter_rw(const struct iov_iter *i) { return i->data_source ? WRITE : READ; } static inline bool user_backed_iter(const struct iov_iter *i) { return iter_is_ubuf(i) || iter_is_iovec(i); } /* * Total number of bytes covered by an iovec. * * NOTE that it is not safe to use this function until all the iovec's * segment lengths have been validated. Because the individual lengths can * overflow a size_t when added together. */ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) { unsigned long seg; size_t ret = 0; for (seg = 0; seg < nr_segs; seg++) ret += iov[seg].iov_len; return ret; } size_t copy_page_from_iter_atomic(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_revert(struct iov_iter *i, size_t bytes); size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes); size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, size_t bytes, struct iov_iter *i) { return copy_page_to_iter(&folio->page, offset, bytes, i); } static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset, size_t bytes, struct iov_iter *i) { return copy_page_from_iter(&folio->page, offset, bytes, i); } static inline size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset, size_t bytes, struct iov_iter *i) { return copy_page_from_iter_atomic(&folio->page, offset, bytes, i); } size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, struct iov_iter *i); static __always_inline __must_check size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (check_copy_size(addr, bytes, true)) return _copy_to_iter(addr, bytes, i); return 0; } static __always_inline __must_check size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { if (check_copy_size(addr, bytes, false)) return _copy_from_iter(addr, bytes, i); return 0; } static __always_inline __must_check bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i) { size_t copied = copy_to_iter(addr, bytes, i); if (likely(copied == bytes)) return true; iov_iter_revert(i, copied); return false; } static __always_inline __must_check bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { size_t copied = copy_from_iter(addr, bytes, i); if (likely(copied == bytes)) return true; iov_iter_revert(i, copied); return false; } static __always_inline __must_check size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { if (check_copy_size(addr, bytes, false)) return _copy_from_iter_nocache(addr, bytes, i); return 0; } static __always_inline __must_check bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { size_t copied = copy_from_iter_nocache(addr, bytes, i); if (likely(copied == bytes)) return true; iov_iter_revert(i, copied); return false; } #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE /* * Note, users like pmem that depend on the stricter semantics of * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the * destination is flushed from the cache on return. */ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); #else #define _copy_from_iter_flushcache _copy_from_iter_nocache #endif #ifdef CONFIG_ARCH_HAS_COPY_MC size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); #else #define _copy_mc_to_iter _copy_to_iter #endif size_t iov_iter_zero(size_t bytes, struct iov_iter *); bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask); unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_gap_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count); void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count); void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, const struct folio_queue *folioq, unsigned int first_slot, unsigned int offset, size_t count); void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, loff_t start, size_t count); ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); static inline size_t iov_iter_count(const struct iov_iter *i) { return i->count; } /* * Cap the iov_iter by given limit; note that the second argument is * *not* the new size - it's upper limit for such. Passing it a value * greater than the amount of data in iov_iter is fine - it'll just do * nothing in that case. */ static inline void iov_iter_truncate(struct iov_iter *i, u64 count) { /* * count doesn't have to fit in size_t - comparison extends both * operands to u64 here and any value that would be truncated by * conversion in assignement is by definition greater than all * values of size_t, including old i->count. */ if (i->count > count) i->count = count; } /* * reexpand a previously truncated iterator; count must be no more than how much * we had shrunk it. */ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } static inline int iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) { size_t shorted = 0; int npages; if (iov_iter_count(i) > max_bytes) { shorted = iov_iter_count(i) - max_bytes; iov_iter_truncate(i, max_bytes); } npages = iov_iter_npages(i, maxpages); if (shorted) iov_iter_reexpand(i, iov_iter_count(i) + shorted); return npages; } struct iovec *iovec_from_user(const struct iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_iov, bool compat); ssize_t import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i); ssize_t __import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i, bool compat); int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i); static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, void __user *buf, size_t count) { WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_UBUF, .data_source = direction, .ubuf = buf, .count = count, .nr_segs = 1 }; } /* Flags for iov_iter_get/extract_pages*() */ /* Allow P2PDMA on the extracted pages */ #define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01) ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0); /** * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained * @iter: The iterator * * Examine the iterator and indicate by returning true or false as to how, if * at all, pages extracted from the iterator will be retained by the extraction * function. * * %true indicates that the pages will have a pin placed in them that the * caller must unpin. This is must be done for DMA/async DIO to force fork() * to forcibly copy a page for the child (the parent must retain the original * page). * * %false indicates that no measures are taken and that it's up to the caller * to retain the pages. */ static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter) { return user_backed_iter(iter); } struct sg_table; ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len, struct sg_table *sgtable, unsigned int sg_max, iov_iter_extraction_t extraction_flags); #endif |
83 2971 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM skb #if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SKB_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/tracepoint.h> #undef FN #define FN(reason) TRACE_DEFINE_ENUM(SKB_DROP_REASON_##reason); DEFINE_DROP_REASON(FN, FN) #undef FN #undef FNe #define FN(reason) { SKB_DROP_REASON_##reason, #reason }, #define FNe(reason) { SKB_DROP_REASON_##reason, #reason } /* * Tracepoint for free an sk_buff: */ TRACE_EVENT(kfree_skb, TP_PROTO(struct sk_buff *skb, void *location, enum skb_drop_reason reason, struct sock *rx_sk), TP_ARGS(skb, location, reason, rx_sk), TP_STRUCT__entry( __field(void *, skbaddr) __field(void *, location) __field(void *, rx_sk) __field(unsigned short, protocol) __field(enum skb_drop_reason, reason) ), TP_fast_assign( __entry->skbaddr = skb; __entry->location = location; __entry->rx_sk = rx_sk; __entry->protocol = ntohs(skb->protocol); __entry->reason = reason; ), TP_printk("skbaddr=%p rx_sk=%p protocol=%u location=%pS reason: %s", __entry->skbaddr, __entry->rx_sk, __entry->protocol, __entry->location, __print_symbolic(__entry->reason, DEFINE_DROP_REASON(FN, FNe))) ); #undef FN #undef FNe TRACE_EVENT(consume_skb, TP_PROTO(struct sk_buff *skb, void *location), TP_ARGS(skb, location), TP_STRUCT__entry( __field( void *, skbaddr) __field( void *, location) ), TP_fast_assign( __entry->skbaddr = skb; __entry->location = location; ), TP_printk("skbaddr=%p location=%pS", __entry->skbaddr, __entry->location) ); TRACE_EVENT(skb_copy_datagram_iovec, TP_PROTO(const struct sk_buff *skb, int len), TP_ARGS(skb, len), TP_STRUCT__entry( __field( const void *, skbaddr ) __field( int, len ) ), TP_fast_assign( __entry->skbaddr = skb; __entry->len = len; ), TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len) ); #endif /* _TRACE_SKB_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | // SPDX-License-Identifier: GPL-2.0+ /* * Special Initializers for certain USB Mass Storage devices * * Current development and maintenance by: * (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) * * This driver is based on the 'USB Mass Storage Class' document. This * describes in detail the protocol used to communicate with such * devices. Clearly, the designers had SCSI and ATAPI commands in * mind when they created this document. The commands are all very * similar to commands in the SCSI-II and ATAPI specifications. * * It is important to note that in a number of cases this class * exhibits class-specific exemptions from the USB specification. * Notably the usage of NAK, STALL and ACK differs from the norm, in * that they are used to communicate wait, failed and OK on commands. * * Also, for certain devices, the interrupt endpoint is used to convey * status of a command. */ #include <linux/errno.h> #include "usb.h" #include "initializers.h" #include "debug.h" #include "transport.h" /* * This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target * mode */ int usb_stor_euscsi_init(struct us_data *us) { int result; usb_stor_dbg(us, "Attempting to init eUSCSI bridge...\n"); result = usb_stor_control_msg(us, us->send_ctrl_pipe, 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR, 0x01, 0x0, NULL, 0x0, 5 * HZ); usb_stor_dbg(us, "-- result is %d\n", result); return 0; } /* * This function is required to activate all four slots on the UCR-61S2B * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap*) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap*) us->iobuf; int res; unsigned int partial; static char init_string[] = "\xec\x0a\x06\x00$PCCHIPS"; usb_stor_dbg(us, "Sending UCR-61S2B initialization packet...\n"); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->Tag = 0; bcb->DataTransferLength = cpu_to_le32(0); bcb->Flags = bcb->Lun = 0; bcb->Length = sizeof(init_string) - 1; memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, init_string, sizeof(init_string) - 1); res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, &partial); if (res) return -EIO; usb_stor_dbg(us, "Getting status packet...\n"); res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &partial); if (res) return -EIO; return 0; } /* This places the HUAWEI E220 devices in multi-port mode */ int usb_stor_huawei_e220_init(struct us_data *us) { int result; result = usb_stor_control_msg(us, us->send_ctrl_pipe, USB_REQ_SET_FEATURE, USB_TYPE_STANDARD | USB_RECIP_DEVICE, 0x01, 0x0, NULL, 0x0, 1 * HZ); usb_stor_dbg(us, "Huawei mode set result is %d\n", result); return 0; } |
5 5 6 6 6 2 2 22 22 22 22 1 1 3 22 15 22 22 22 22 16 19 19 36 11 33 9 16 3 8 4 4 4 2 15 1 14 13 2 33 12 3 9 3 1 3 4 8 8 1 7 1 2 1 8 8 9 1 8 1 7 7 7 7 4 1 3 3 3 3 4 4 4 1 3 3 1 3 3 7 7 3 5 5 5 5 5 36 33 28 30 6 5 3 2 1 44 1 30 2 7 2 4 5 6 7 6 2 2 2 1 1 5 6 5 5 5 3 3 7 7 7 6 5 7 7 7 15 15 7 5 13 2 2 2 2 2 2 30 17 7 7 7 7 5 2 2 30 15 30 28 2 43 2 44 22 44 44 44 44 30 44 2 2 2 2 2 2 2 43 31 44 44 44 44 36 35 8 44 30 14 44 1 43 5 5 5 44 44 36 44 44 33 30 30 30 1 29 30 2 2 2 2 2 2 2 2 2 44 42 42 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 | // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_driver.c -- USB Video Class driver * * Copyright (C) 2005-2010 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <linux/atomic.h> #include <linux/bits.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/quirks.h> #include <linux/usb/uvc.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/unaligned.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "uvcvideo.h" #define DRIVER_AUTHOR "Laurent Pinchart " \ "<laurent.pinchart@ideasonboard.com>" #define DRIVER_DESC "USB Video Class driver" unsigned int uvc_clock_param = CLOCK_MONOTONIC; unsigned int uvc_hw_timestamps_param; unsigned int uvc_no_drop_param; static unsigned int uvc_quirks_param = -1; unsigned int uvc_dbg_param; unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT; /* ------------------------------------------------------------------------ * Utility functions */ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts, u8 epaddr) { struct usb_host_endpoint *ep; unsigned int i; for (i = 0; i < alts->desc.bNumEndpoints; ++i) { ep = &alts->endpoint[i]; if (ep->desc.bEndpointAddress == epaddr) return ep; } return NULL; } static enum v4l2_colorspace uvc_colorspace(const u8 primaries) { static const enum v4l2_colorspace colorprimaries[] = { V4L2_COLORSPACE_SRGB, /* Unspecified */ V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_470_SYSTEM_M, V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_SMPTE240M, }; if (primaries < ARRAY_SIZE(colorprimaries)) return colorprimaries[primaries]; return V4L2_COLORSPACE_SRGB; /* Reserved */ } static enum v4l2_xfer_func uvc_xfer_func(const u8 transfer_characteristics) { /* * V4L2 does not currently have definitions for all possible values of * UVC transfer characteristics. If v4l2_xfer_func is extended with new * values, the mapping below should be updated. * * Substitutions are taken from the mapping given for * V4L2_XFER_FUNC_DEFAULT documented in videodev2.h. */ static const enum v4l2_xfer_func xfer_funcs[] = { V4L2_XFER_FUNC_DEFAULT, /* Unspecified */ V4L2_XFER_FUNC_709, V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 M */ V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 B, G */ V4L2_XFER_FUNC_709, /* Substitution for SMPTE 170M */ V4L2_XFER_FUNC_SMPTE240M, V4L2_XFER_FUNC_NONE, V4L2_XFER_FUNC_SRGB, }; if (transfer_characteristics < ARRAY_SIZE(xfer_funcs)) return xfer_funcs[transfer_characteristics]; return V4L2_XFER_FUNC_DEFAULT; /* Reserved */ } static enum v4l2_ycbcr_encoding uvc_ycbcr_enc(const u8 matrix_coefficients) { /* * V4L2 does not currently have definitions for all possible values of * UVC matrix coefficients. If v4l2_ycbcr_encoding is extended with new * values, the mapping below should be updated. * * Substitutions are taken from the mapping given for * V4L2_YCBCR_ENC_DEFAULT documented in videodev2.h. * * FCC is assumed to be close enough to 601. */ static const enum v4l2_ycbcr_encoding ycbcr_encs[] = { V4L2_YCBCR_ENC_DEFAULT, /* Unspecified */ V4L2_YCBCR_ENC_709, V4L2_YCBCR_ENC_601, /* Substitution for FCC */ V4L2_YCBCR_ENC_601, /* Substitution for BT.470-2 B, G */ V4L2_YCBCR_ENC_601, V4L2_YCBCR_ENC_SMPTE240M, }; if (matrix_coefficients < ARRAY_SIZE(ycbcr_encs)) return ycbcr_encs[matrix_coefficients]; return V4L2_YCBCR_ENC_DEFAULT; /* Reserved */ } /* ------------------------------------------------------------------------ * Terminal and unit management */ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) { struct uvc_entity *entity; list_for_each_entry(entity, &dev->entities, list) { if (entity->id == id) return entity; } return NULL; } static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev, int id, struct uvc_entity *entity) { unsigned int i; if (entity == NULL) entity = list_entry(&dev->entities, struct uvc_entity, list); list_for_each_entry_continue(entity, &dev->entities, list) { for (i = 0; i < entity->bNrInPins; ++i) if (entity->baSourceID[i] == id) return entity; } return NULL; } static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id) { struct uvc_streaming *stream; list_for_each_entry(stream, &dev->streams, list) { if (stream->header.bTerminalLink == id) return stream; } return NULL; } /* ------------------------------------------------------------------------ * Streaming Object Management */ static void uvc_stream_delete(struct uvc_streaming *stream) { if (stream->async_wq) destroy_workqueue(stream->async_wq); mutex_destroy(&stream->mutex); usb_put_intf(stream->intf); kfree(stream->formats); kfree(stream->header.bmaControls); kfree(stream); } static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev, struct usb_interface *intf) { struct uvc_streaming *stream; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) return NULL; mutex_init(&stream->mutex); stream->dev = dev; stream->intf = usb_get_intf(intf); stream->intfnum = intf->cur_altsetting->desc.bInterfaceNumber; /* Allocate a stream specific work queue for asynchronous tasks. */ stream->async_wq = alloc_workqueue("uvcvideo", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!stream->async_wq) { uvc_stream_delete(stream); return NULL; } return stream; } /* ------------------------------------------------------------------------ * Descriptors parsing */ static int uvc_parse_format(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, struct uvc_frame *frames, u32 **intervals, const unsigned char *buffer, int buflen) { struct usb_interface *intf = streaming->intf; struct usb_host_interface *alts = intf->cur_altsetting; const struct uvc_format_desc *fmtdesc; struct uvc_frame *frame; const unsigned char *start = buffer; unsigned int width_multiplier = 1; unsigned int interval; unsigned int i, n; u8 ftype; format->type = buffer[2]; format->index = buffer[3]; format->frames = frames; switch (buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_FRAME_BASED: n = buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED ? 27 : 28; if (buflen < n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* Find the format descriptor from its GUID. */ fmtdesc = uvc_format_by_guid(&buffer[5]); if (!fmtdesc) { /* * Unknown video formats are not fatal errors, the * caller will skip this descriptor. */ dev_info(&streaming->intf->dev, "Unknown video format %pUl\n", &buffer[5]); return 0; } format->fcc = fmtdesc->fcc; format->bpp = buffer[21]; /* * Some devices report a format that doesn't match what they * really send. */ if (dev->quirks & UVC_QUIRK_FORCE_Y8) { if (format->fcc == V4L2_PIX_FMT_YUYV) { format->fcc = V4L2_PIX_FMT_GREY; format->bpp = 8; width_multiplier = 2; } } /* Some devices report bpp that doesn't match the format. */ if (dev->quirks & UVC_QUIRK_FORCE_BPP) { const struct v4l2_format_info *info = v4l2_format_info(format->fcc); if (info) { unsigned int div = info->hdiv * info->vdiv; n = info->bpp[0] * div; for (i = 1; i < info->comp_planes; i++) n += info->bpp[i]; format->bpp = DIV_ROUND_UP(8 * n, div); } } if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) { ftype = UVC_VS_FRAME_UNCOMPRESSED; } else { ftype = UVC_VS_FRAME_FRAME_BASED; if (buffer[27]) format->flags = UVC_FMT_FLAG_COMPRESSED; } break; case UVC_VS_FORMAT_MJPEG: if (buflen < 11) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->fcc = V4L2_PIX_FMT_MJPEG; format->flags = UVC_FMT_FLAG_COMPRESSED; format->bpp = 0; ftype = UVC_VS_FRAME_MJPEG; break; case UVC_VS_FORMAT_DV: if (buflen < 9) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } if ((buffer[8] & 0x7f) > 2) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d: unknown DV format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[8]); return -EINVAL; } format->fcc = V4L2_PIX_FMT_DV; format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM; format->bpp = 0; ftype = 0; /* Create a dummy frame descriptor. */ frame = &frames[0]; memset(frame, 0, sizeof(*frame)); frame->bFrameIntervalType = 1; frame->dwDefaultFrameInterval = 1; frame->dwFrameInterval = *intervals; *(*intervals)++ = 1; format->nframes = 1; break; case UVC_VS_FORMAT_MPEG2TS: case UVC_VS_FORMAT_STREAM_BASED: /* Not supported yet. */ default: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d unsupported format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[2]); return -EINVAL; } uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc); buflen -= buffer[0]; buffer += buffer[0]; /* * Parse the frame descriptors. Only uncompressed, MJPEG and frame * based formats have frame descriptors. */ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == ftype) { unsigned int maxIntervalIndex; frame = &frames[format->nframes]; if (ftype != UVC_VS_FRAME_FRAME_BASED) n = buflen > 25 ? buffer[25] : 0; else n = buflen > 21 ? buffer[21] : 0; n = n ? n : 3; if (buflen < 26 + 4*n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FRAME error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } frame->bFrameIndex = buffer[3]; frame->bmCapabilities = buffer[4]; frame->wWidth = get_unaligned_le16(&buffer[5]) * width_multiplier; frame->wHeight = get_unaligned_le16(&buffer[7]); frame->dwMinBitRate = get_unaligned_le32(&buffer[9]); frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]); if (ftype != UVC_VS_FRAME_FRAME_BASED) { frame->dwMaxVideoFrameBufferSize = get_unaligned_le32(&buffer[17]); frame->dwDefaultFrameInterval = get_unaligned_le32(&buffer[21]); frame->bFrameIntervalType = buffer[25]; } else { frame->dwMaxVideoFrameBufferSize = 0; frame->dwDefaultFrameInterval = get_unaligned_le32(&buffer[17]); frame->bFrameIntervalType = buffer[21]; } /* * Copy the frame intervals. * * Some bogus devices report dwMinFrameInterval equal to * dwMaxFrameInterval and have dwFrameIntervalStep set to * zero. Setting all null intervals to 1 fixes the problem and * some other divisions by zero that could happen. */ frame->dwFrameInterval = *intervals; for (i = 0; i < n; ++i) { interval = get_unaligned_le32(&buffer[26+4*i]); (*intervals)[i] = interval ? interval : 1; } /* * Apply more fixes, quirks and workarounds to handle incorrect * or broken descriptors. */ /* * Several UVC chipsets screw up dwMaxVideoFrameBufferSize * completely. Observed behaviours range from setting the * value to 1.1x the actual frame size to hardwiring the * 16 low bits to 0. This results in a higher than necessary * memory usage as well as a wrong image size information. For * uncompressed formats this can be fixed by computing the * value from the frame size. */ if (!(format->flags & UVC_FMT_FLAG_COMPRESSED)) frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth * frame->wHeight / 8; /* * Clamp the default frame interval to the boundaries. A zero * bFrameIntervalType value indicates a continuous frame * interval range, with dwFrameInterval[0] storing the minimum * value and dwFrameInterval[1] storing the maximum value. */ maxIntervalIndex = frame->bFrameIntervalType ? n - 1 : 1; frame->dwDefaultFrameInterval = clamp(frame->dwDefaultFrameInterval, frame->dwFrameInterval[0], frame->dwFrameInterval[maxIntervalIndex]); /* * Some devices report frame intervals that are not functional. * If the corresponding quirk is set, restrict operation to the * first interval only. */ if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) { frame->bFrameIntervalType = 1; (*intervals)[0] = frame->dwDefaultFrameInterval; } uvc_dbg(dev, DESCR, "- %ux%u (%u.%u fps)\n", frame->wWidth, frame->wHeight, 10000000 / frame->dwDefaultFrameInterval, (100000000 / frame->dwDefaultFrameInterval) % 10); format->nframes++; *intervals += n; buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == UVC_VS_STILL_IMAGE_FRAME) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == UVC_VS_COLORFORMAT) { if (buflen < 6) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d COLORFORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->colorspace = uvc_colorspace(buffer[3]); format->xfer_func = uvc_xfer_func(buffer[4]); format->ycbcr_enc = uvc_ycbcr_enc(buffer[5]); buflen -= buffer[0]; buffer += buffer[0]; } else { format->colorspace = V4L2_COLORSPACE_SRGB; } return buffer - start; } static int uvc_parse_streaming(struct uvc_device *dev, struct usb_interface *intf) { struct uvc_streaming *streaming = NULL; struct uvc_format *format; struct uvc_frame *frame; struct usb_host_interface *alts = &intf->altsetting[0]; const unsigned char *_buffer, *buffer = alts->extra; int _buflen, buflen = alts->extralen; unsigned int nformats = 0, nframes = 0, nintervals = 0; unsigned int size, i, n, p; u32 *interval; u16 psize; int ret = -EINVAL; if (intf->cur_altsetting->desc.bInterfaceSubClass != UVC_SC_VIDEOSTREAMING) { uvc_dbg(dev, DESCR, "device %d interface %d isn't a video streaming interface\n", dev->udev->devnum, intf->altsetting[0].desc.bInterfaceNumber); return -EINVAL; } if (usb_driver_claim_interface(&uvc_driver.driver, intf, dev)) { uvc_dbg(dev, DESCR, "device %d interface %d is already claimed\n", dev->udev->devnum, intf->altsetting[0].desc.bInterfaceNumber); return -EINVAL; } streaming = uvc_stream_new(dev, intf); if (streaming == NULL) { usb_driver_release_interface(&uvc_driver.driver, intf); return -ENOMEM; } /* * The Pico iMage webcam has its class-specific interface descriptors * after the endpoint descriptors. */ if (buflen == 0) { for (i = 0; i < alts->desc.bNumEndpoints; ++i) { struct usb_host_endpoint *ep = &alts->endpoint[i]; if (ep->extralen == 0) continue; if (ep->extralen > 2 && ep->extra[1] == USB_DT_CS_INTERFACE) { uvc_dbg(dev, DESCR, "trying extra data from endpoint %u\n", i); buffer = alts->endpoint[i].extra; buflen = alts->endpoint[i].extralen; break; } } } /* Skip the standard interface descriptors. */ while (buflen > 2 && buffer[1] != USB_DT_CS_INTERFACE) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen <= 2) { uvc_dbg(dev, DESCR, "no class-specific streaming interface descriptors found\n"); goto error; } /* Parse the header descriptor. */ switch (buffer[2]) { case UVC_VS_OUTPUT_HEADER: streaming->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; size = 9; break; case UVC_VS_INPUT_HEADER: streaming->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; size = 13; break; default: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d HEADER descriptor not found\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } p = buflen >= 4 ? buffer[3] : 0; n = buflen >= size ? buffer[size-1] : 0; if (buflen < size + p*n) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d HEADER descriptor is invalid\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } streaming->header.bNumFormats = p; streaming->header.bEndpointAddress = buffer[6]; if (buffer[2] == UVC_VS_INPUT_HEADER) { streaming->header.bmInfo = buffer[7]; streaming->header.bTerminalLink = buffer[8]; streaming->header.bStillCaptureMethod = buffer[9]; streaming->header.bTriggerSupport = buffer[10]; streaming->header.bTriggerUsage = buffer[11]; } else { streaming->header.bTerminalLink = buffer[7]; } streaming->header.bControlSize = n; streaming->header.bmaControls = kmemdup(&buffer[size], p * n, GFP_KERNEL); if (streaming->header.bmaControls == NULL) { ret = -ENOMEM; goto error; } buflen -= buffer[0]; buffer += buffer[0]; _buffer = buffer; _buflen = buflen; /* Count the format and frame descriptors. */ while (_buflen > 2 && _buffer[1] == USB_DT_CS_INTERFACE) { switch (_buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_MJPEG: case UVC_VS_FORMAT_FRAME_BASED: nformats++; break; case UVC_VS_FORMAT_DV: /* * DV format has no frame descriptor. We will create a * dummy frame descriptor with a dummy frame interval. */ nformats++; nframes++; nintervals++; break; case UVC_VS_FORMAT_MPEG2TS: case UVC_VS_FORMAT_STREAM_BASED: uvc_dbg(dev, DESCR, "device %d videostreaming interface %d FORMAT %u is not supported\n", dev->udev->devnum, alts->desc.bInterfaceNumber, _buffer[2]); break; case UVC_VS_FRAME_UNCOMPRESSED: case UVC_VS_FRAME_MJPEG: nframes++; if (_buflen > 25) nintervals += _buffer[25] ? _buffer[25] : 3; break; case UVC_VS_FRAME_FRAME_BASED: nframes++; if (_buflen > 21) nintervals += _buffer[21] ? _buffer[21] : 3; break; } _buflen -= _buffer[0]; _buffer += _buffer[0]; } if (nformats == 0) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d has no supported formats defined\n", dev->udev->devnum, alts->desc.bInterfaceNumber); goto error; } /* * Allocate memory for the formats, the frames and the intervals, * plus any required padding to guarantee that everything has the * correct alignment. */ size = nformats * sizeof(*format); size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame); size = ALIGN(size, __alignof__(*interval)) + nintervals * sizeof(*interval); format = kzalloc(size, GFP_KERNEL); if (!format) { ret = -ENOMEM; goto error; } frame = (void *)format + nformats * sizeof(*format); frame = PTR_ALIGN(frame, __alignof__(*frame)); interval = (void *)frame + nframes * sizeof(*frame); interval = PTR_ALIGN(interval, __alignof__(*interval)); streaming->formats = format; streaming->nformats = 0; /* Parse the format descriptors. */ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) { switch (buffer[2]) { case UVC_VS_FORMAT_UNCOMPRESSED: case UVC_VS_FORMAT_MJPEG: case UVC_VS_FORMAT_DV: case UVC_VS_FORMAT_FRAME_BASED: ret = uvc_parse_format(dev, streaming, format, frame, &interval, buffer, buflen); if (ret < 0) goto error; if (!ret) break; streaming->nformats++; frame += format->nframes; format++; buflen -= ret; buffer += ret; continue; default: break; } buflen -= buffer[0]; buffer += buffer[0]; } if (buflen) uvc_dbg(dev, DESCR, "device %d videostreaming interface %d has %u bytes of trailing descriptor garbage\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buflen); /* Parse the alternate settings to find the maximum bandwidth. */ for (i = 0; i < intf->num_altsetting; ++i) { struct usb_host_endpoint *ep; alts = &intf->altsetting[i]; ep = uvc_find_endpoint(alts, streaming->header.bEndpointAddress); if (ep == NULL) continue; psize = uvc_endpoint_max_bpi(dev->udev, ep); if (psize > streaming->maxpsize) streaming->maxpsize = psize; } list_add_tail(&streaming->list, &dev->streams); return 0; error: usb_driver_release_interface(&uvc_driver.driver, intf); uvc_stream_delete(streaming); return ret; } static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA; static const u8 uvc_gpio_guid[16] = UVC_GUID_EXT_GPIO_CONTROLLER; static const u8 uvc_media_transport_input_guid[16] = UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT; static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING; static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id, unsigned int num_pads, unsigned int extra_size) { struct uvc_entity *entity; unsigned int num_inputs; unsigned int size; unsigned int i; extra_size = roundup(extra_size, sizeof(*entity->pads)); if (num_pads) num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1; else num_inputs = 0; size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + num_inputs; entity = kzalloc(size, GFP_KERNEL); if (entity == NULL) return NULL; entity->id = id; entity->type = type; /* * Set the GUID for standard entity types. For extension units, the GUID * is initialized by the caller. */ switch (type) { case UVC_EXT_GPIO_UNIT: memcpy(entity->guid, uvc_gpio_guid, 16); break; case UVC_ITT_CAMERA: memcpy(entity->guid, uvc_camera_guid, 16); break; case UVC_ITT_MEDIA_TRANSPORT_INPUT: memcpy(entity->guid, uvc_media_transport_input_guid, 16); break; case UVC_VC_PROCESSING_UNIT: memcpy(entity->guid, uvc_processing_guid, 16); break; } entity->num_links = 0; entity->num_pads = num_pads; entity->pads = ((void *)(entity + 1)) + extra_size; for (i = 0; i < num_inputs; ++i) entity->pads[i].flags = MEDIA_PAD_FL_SINK; if (!UVC_ENTITY_IS_OTERM(entity) && num_pads) entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE; entity->bNrInPins = num_inputs; entity->baSourceID = (u8 *)(&entity->pads[num_pads]); return entity; } static void uvc_entity_set_name(struct uvc_device *dev, struct uvc_entity *entity, const char *type_name, u8 string_id) { int ret; /* * First attempt to read the entity name from the device. If the entity * has no associated string, or if reading the string fails (most * likely due to a buggy firmware), fall back to default names based on * the entity type. */ if (string_id) { ret = usb_string(dev->udev, string_id, entity->name, sizeof(entity->name)); if (!ret) return; } sprintf(entity->name, "%s %u", type_name, entity->id); } /* Parse vendor-specific extensions. */ static int uvc_parse_vendor_control(struct uvc_device *dev, const unsigned char *buffer, int buflen) { struct usb_device *udev = dev->udev; struct usb_host_interface *alts = dev->intf->cur_altsetting; struct uvc_entity *unit; unsigned int n, p; int handled = 0; switch (le16_to_cpu(dev->udev->descriptor.idVendor)) { case 0x046d: /* Logitech */ if (buffer[1] != 0x41 || buffer[2] != 0x01) break; /* * Logitech implements several vendor specific functions * through vendor specific extension units (LXU). * * The LXU descriptors are similar to XU descriptors * (see "USB Device Video Class for Video Devices", section * 3.7.2.6 "Extension Unit Descriptor") with the following * differences: * * ---------------------------------------------------------- * 0 bLength 1 Number * Size of this descriptor, in bytes: 24+p+n*2 * ---------------------------------------------------------- * 23+p+n bmControlsType N Bitmap * Individual bits in the set are defined: * 0: Absolute * 1: Relative * * This bitset is mapped exactly the same as bmControls. * ---------------------------------------------------------- * 23+p+n*2 bReserved 1 Boolean * ---------------------------------------------------------- * 24+p+n*2 iExtension 1 Index * Index of a string descriptor that describes this * extension unit. * ---------------------------------------------------------- */ p = buflen >= 22 ? buffer[21] : 0; n = buflen >= 25 + p ? buffer[22+p] : 0; if (buflen < 25 + p + 2*n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d EXTENSION_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); break; } unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3], p + 1, 2*n); if (unit == NULL) return -ENOMEM; memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; memcpy(unit->baSourceID, &buffer[22], p); unit->extension.bControlSize = buffer[22+p]; unit->extension.bmControls = (u8 *)unit + sizeof(*unit); unit->extension.bmControlsType = (u8 *)unit + sizeof(*unit) + n; memcpy(unit->extension.bmControls, &buffer[23+p], 2*n); uvc_entity_set_name(dev, unit, "Extension", buffer[24+p+2*n]); list_add_tail(&unit->list, &dev->entities); handled = 1; break; } return handled; } static int uvc_parse_standard_control(struct uvc_device *dev, const unsigned char *buffer, int buflen) { struct usb_device *udev = dev->udev; struct uvc_entity *unit, *term; struct usb_interface *intf; struct usb_host_interface *alts = dev->intf->cur_altsetting; unsigned int i, n, p, len; const char *type_name; u16 type; switch (buffer[2]) { case UVC_VC_HEADER: n = buflen >= 12 ? buffer[11] : 0; if (buflen < 12 + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d HEADER error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } dev->uvc_version = get_unaligned_le16(&buffer[3]); dev->clock_frequency = get_unaligned_le32(&buffer[7]); /* Parse all USB Video Streaming interfaces. */ for (i = 0; i < n; ++i) { intf = usb_ifnum_to_if(udev, buffer[12+i]); if (intf == NULL) { uvc_dbg(dev, DESCR, "device %d interface %d doesn't exists\n", udev->devnum, i); continue; } uvc_parse_streaming(dev, intf); } break; case UVC_VC_INPUT_TERMINAL: if (buflen < 8) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* * Reject invalid terminal types that would cause issues: * * - The high byte must be non-zero, otherwise it would be * confused with a unit. * * - Bit 15 must be 0, as we use it internally as a terminal * direction flag. * * Other unknown types are accepted. */ type = get_unaligned_le16(&buffer[4]); if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL %d has invalid type 0x%04x, skipping\n", udev->devnum, alts->desc.bInterfaceNumber, buffer[3], type); return 0; } n = 0; p = 0; len = 8; if (type == UVC_ITT_CAMERA) { n = buflen >= 15 ? buffer[14] : 0; len = 15; } else if (type == UVC_ITT_MEDIA_TRANSPORT_INPUT) { n = buflen >= 9 ? buffer[8] : 0; p = buflen >= 10 + n ? buffer[9+n] : 0; len = 10; } if (buflen < len + n + p) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d INPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3], 1, n + p); if (term == NULL) return -ENOMEM; if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) { term->camera.bControlSize = n; term->camera.bmControls = (u8 *)term + sizeof(*term); term->camera.wObjectiveFocalLengthMin = get_unaligned_le16(&buffer[8]); term->camera.wObjectiveFocalLengthMax = get_unaligned_le16(&buffer[10]); term->camera.wOcularFocalLength = get_unaligned_le16(&buffer[12]); memcpy(term->camera.bmControls, &buffer[15], n); } else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) { term->media.bControlSize = n; term->media.bmControls = (u8 *)term + sizeof(*term); term->media.bTransportModeSize = p; term->media.bmTransportModes = (u8 *)term + sizeof(*term) + n; memcpy(term->media.bmControls, &buffer[9], n); memcpy(term->media.bmTransportModes, &buffer[10+n], p); } if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) type_name = "Camera"; else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) type_name = "Media"; else type_name = "Input"; uvc_entity_set_name(dev, term, type_name, buffer[7]); list_add_tail(&term->list, &dev->entities); break; case UVC_VC_OUTPUT_TERMINAL: if (buflen < 9) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d OUTPUT_TERMINAL error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* * Make sure the terminal type MSB is not null, otherwise it * could be confused with a unit. */ type = get_unaligned_le16(&buffer[4]); if ((type & 0xff00) == 0) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d OUTPUT_TERMINAL %d has invalid type 0x%04x, skipping\n", udev->devnum, alts->desc.bInterfaceNumber, buffer[3], type); return 0; } term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3], 1, 0); if (term == NULL) return -ENOMEM; memcpy(term->baSourceID, &buffer[7], 1); uvc_entity_set_name(dev, term, "Output", buffer[8]); list_add_tail(&term->list, &dev->entities); break; case UVC_VC_SELECTOR_UNIT: p = buflen >= 5 ? buffer[4] : 0; if (buflen < 5 || buflen < 6 + p) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d SELECTOR_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0); if (unit == NULL) return -ENOMEM; memcpy(unit->baSourceID, &buffer[5], p); uvc_entity_set_name(dev, unit, "Selector", buffer[5+p]); list_add_tail(&unit->list, &dev->entities); break; case UVC_VC_PROCESSING_UNIT: n = buflen >= 8 ? buffer[7] : 0; p = dev->uvc_version >= 0x0110 ? 10 : 9; if (buflen < p + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d PROCESSING_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n); if (unit == NULL) return -ENOMEM; memcpy(unit->baSourceID, &buffer[4], 1); unit->processing.wMaxMultiplier = get_unaligned_le16(&buffer[5]); unit->processing.bControlSize = buffer[7]; unit->processing.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->processing.bmControls, &buffer[8], n); if (dev->uvc_version >= 0x0110) unit->processing.bmVideoStandards = buffer[9+n]; uvc_entity_set_name(dev, unit, "Processing", buffer[8+n]); list_add_tail(&unit->list, &dev->entities); break; case UVC_VC_EXTENSION_UNIT: p = buflen >= 22 ? buffer[21] : 0; n = buflen >= 24 + p ? buffer[22+p] : 0; if (buflen < 24 + p + n) { uvc_dbg(dev, DESCR, "device %d videocontrol interface %d EXTENSION_UNIT error\n", udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n); if (unit == NULL) return -ENOMEM; memcpy(unit->guid, &buffer[4], 16); unit->extension.bNumControls = buffer[20]; memcpy(unit->baSourceID, &buffer[22], p); unit->extension.bControlSize = buffer[22+p]; unit->extension.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->extension.bmControls, &buffer[23+p], n); uvc_entity_set_name(dev, unit, "Extension", buffer[23+p+n]); list_add_tail(&unit->list, &dev->entities); break; default: uvc_dbg(dev, DESCR, "Found an unknown CS_INTERFACE descriptor (%u)\n", buffer[2]); break; } return 0; } static int uvc_parse_control(struct uvc_device *dev) { struct usb_host_interface *alts = dev->intf->cur_altsetting; const unsigned char *buffer = alts->extra; int buflen = alts->extralen; int ret; /* * Parse the default alternate setting only, as the UVC specification * defines a single alternate setting, the default alternate setting * zero. */ while (buflen > 2) { if (uvc_parse_vendor_control(dev, buffer, buflen) || buffer[1] != USB_DT_CS_INTERFACE) goto next_descriptor; ret = uvc_parse_standard_control(dev, buffer, buflen); if (ret < 0) return ret; next_descriptor: buflen -= buffer[0]; buffer += buffer[0]; } /* * Check if the optional status endpoint is present. Built-in iSight * webcams have an interrupt endpoint but spit proprietary data that * don't conform to the UVC status endpoint messages. Don't try to * handle the interrupt endpoint for those cameras. */ if (alts->desc.bNumEndpoints == 1 && !(dev->quirks & UVC_QUIRK_BUILTIN_ISIGHT)) { struct usb_host_endpoint *ep = &alts->endpoint[0]; struct usb_endpoint_descriptor *desc = &ep->desc; if (usb_endpoint_is_int_in(desc) && le16_to_cpu(desc->wMaxPacketSize) >= 8 && desc->bInterval != 0) { uvc_dbg(dev, DESCR, "Found a Status endpoint (addr %02x)\n", desc->bEndpointAddress); dev->int_ep = ep; } } return 0; } /* ----------------------------------------------------------------------------- * Privacy GPIO */ static void uvc_gpio_event(struct uvc_device *dev) { struct uvc_entity *unit = dev->gpio_unit; struct uvc_video_chain *chain; u8 new_val; if (!unit) return; new_val = gpiod_get_value_cansleep(unit->gpio.gpio_privacy); /* GPIO entities are always on the first chain. */ chain = list_first_entry(&dev->chains, struct uvc_video_chain, list); uvc_ctrl_status_event(chain, unit->controls, &new_val); } static int uvc_gpio_get_cur(struct uvc_device *dev, struct uvc_entity *entity, u8 cs, void *data, u16 size) { if (cs != UVC_CT_PRIVACY_CONTROL || size < 1) return -EINVAL; *(u8 *)data = gpiod_get_value_cansleep(entity->gpio.gpio_privacy); return 0; } static int uvc_gpio_get_info(struct uvc_device *dev, struct uvc_entity *entity, u8 cs, u8 *caps) { if (cs != UVC_CT_PRIVACY_CONTROL) return -EINVAL; *caps = UVC_CONTROL_CAP_GET | UVC_CONTROL_CAP_AUTOUPDATE; return 0; } static irqreturn_t uvc_gpio_irq(int irq, void *data) { struct uvc_device *dev = data; uvc_gpio_event(dev); return IRQ_HANDLED; } static int uvc_gpio_parse(struct uvc_device *dev) { struct uvc_entity *unit; struct gpio_desc *gpio_privacy; int irq; gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy", GPIOD_IN); if (IS_ERR_OR_NULL(gpio_privacy)) return PTR_ERR_OR_ZERO(gpio_privacy); irq = gpiod_to_irq(gpio_privacy); if (irq < 0) return dev_err_probe(&dev->udev->dev, irq, "No IRQ for privacy GPIO\n"); unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1); if (!unit) return -ENOMEM; unit->gpio.gpio_privacy = gpio_privacy; unit->gpio.irq = irq; unit->gpio.bControlSize = 1; unit->gpio.bmControls = (u8 *)unit + sizeof(*unit); unit->gpio.bmControls[0] = 1; unit->get_cur = uvc_gpio_get_cur; unit->get_info = uvc_gpio_get_info; strscpy(unit->name, "GPIO", sizeof(unit->name)); list_add_tail(&unit->list, &dev->entities); dev->gpio_unit = unit; return 0; } static int uvc_gpio_init_irq(struct uvc_device *dev) { struct uvc_entity *unit = dev->gpio_unit; if (!unit || unit->gpio.irq < 0) return 0; return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL, uvc_gpio_irq, IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "uvc_privacy_gpio", dev); } /* ------------------------------------------------------------------------ * UVC device scan */ /* * Scan the UVC descriptors to locate a chain starting at an Output Terminal * and containing the following units: * * - one or more Output Terminals (USB Streaming or Display) * - zero or one Processing Unit * - zero, one or more single-input Selector Units * - zero or one multiple-input Selector Units, provided all inputs are * connected to input terminals * - zero, one or mode single-input Extension Units * - one or more Input Terminals (Camera, External or USB Streaming) * * The terminal and units must match on of the following structures: * * ITT_*(0) -> +---------+ +---------+ +---------+ -> TT_STREAMING(0) * ... | SU{0,1} | -> | PU{0,1} | -> | XU{0,n} | ... * ITT_*(n) -> +---------+ +---------+ +---------+ -> TT_STREAMING(n) * * +---------+ +---------+ -> OTT_*(0) * TT_STREAMING -> | PU{0,1} | -> | XU{0,n} | ... * +---------+ +---------+ -> OTT_*(n) * * The Processing Unit and Extension Units can be in any order. Additional * Extension Units connected to the main chain as single-unit branches are * also supported. Single-input Selector Units are ignored. */ static int uvc_scan_chain_entity(struct uvc_video_chain *chain, struct uvc_entity *entity) { switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_EXTENSION_UNIT: uvc_dbg_cont(PROBE, " <- XU %d", entity->id); if (entity->bNrInPins != 1) { uvc_dbg(chain->dev, DESCR, "Extension unit %d has more than 1 input pin\n", entity->id); return -1; } break; case UVC_VC_PROCESSING_UNIT: uvc_dbg_cont(PROBE, " <- PU %d", entity->id); if (chain->processing != NULL) { uvc_dbg(chain->dev, DESCR, "Found multiple Processing Units in chain\n"); return -1; } chain->processing = entity; break; case UVC_VC_SELECTOR_UNIT: uvc_dbg_cont(PROBE, " <- SU %d", entity->id); /* Single-input selector units are ignored. */ if (entity->bNrInPins == 1) break; if (chain->selector != NULL) { uvc_dbg(chain->dev, DESCR, "Found multiple Selector Units in chain\n"); return -1; } chain->selector = entity; break; case UVC_ITT_VENDOR_SPECIFIC: case UVC_ITT_CAMERA: case UVC_ITT_MEDIA_TRANSPORT_INPUT: uvc_dbg_cont(PROBE, " <- IT %d\n", entity->id); break; case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: uvc_dbg_cont(PROBE, " OT %d", entity->id); break; case UVC_TT_STREAMING: if (UVC_ENTITY_IS_ITERM(entity)) uvc_dbg_cont(PROBE, " <- IT %d\n", entity->id); else uvc_dbg_cont(PROBE, " OT %d", entity->id); break; default: uvc_dbg(chain->dev, DESCR, "Unsupported entity type 0x%04x found in chain\n", UVC_ENTITY_TYPE(entity)); return -1; } list_add_tail(&entity->chain, &chain->entities); return 0; } static int uvc_scan_chain_forward(struct uvc_video_chain *chain, struct uvc_entity *entity, struct uvc_entity *prev) { struct uvc_entity *forward; int found; /* Forward scan */ forward = NULL; found = 0; while (1) { forward = uvc_entity_by_reference(chain->dev, entity->id, forward); if (forward == NULL) break; if (forward == prev) continue; if (forward->chain.next || forward->chain.prev) { uvc_dbg(chain->dev, DESCR, "Found reference to entity %d already in chain\n", forward->id); return -EINVAL; } switch (UVC_ENTITY_TYPE(forward)) { case UVC_VC_EXTENSION_UNIT: if (forward->bNrInPins != 1) { uvc_dbg(chain->dev, DESCR, "Extension unit %d has more than 1 input pin\n", forward->id); return -EINVAL; } /* * Some devices reference an output terminal as the * source of extension units. This is incorrect, as * output terminals only have an input pin, and thus * can't be connected to any entity in the forward * direction. The resulting topology would cause issues * when registering the media controller graph. To * avoid this problem, connect the extension unit to * the source of the output terminal instead. */ if (UVC_ENTITY_IS_OTERM(entity)) { struct uvc_entity *source; source = uvc_entity_by_id(chain->dev, entity->baSourceID[0]); if (!source) { uvc_dbg(chain->dev, DESCR, "Can't connect extension unit %u in chain\n", forward->id); break; } forward->baSourceID[0] = source->id; } list_add_tail(&forward->chain, &chain->entities); if (!found) uvc_dbg_cont(PROBE, " (->"); uvc_dbg_cont(PROBE, " XU %d", forward->id); found = 1; break; case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: case UVC_TT_STREAMING: if (UVC_ENTITY_IS_ITERM(forward)) { uvc_dbg(chain->dev, DESCR, "Unsupported input terminal %u\n", forward->id); return -EINVAL; } if (UVC_ENTITY_IS_OTERM(entity)) { uvc_dbg(chain->dev, DESCR, "Unsupported connection between output terminals %u and %u\n", entity->id, forward->id); break; } list_add_tail(&forward->chain, &chain->entities); if (!found) uvc_dbg_cont(PROBE, " (->"); uvc_dbg_cont(PROBE, " OT %d", forward->id); found = 1; break; } } if (found) uvc_dbg_cont(PROBE, ")"); return 0; } static int uvc_scan_chain_backward(struct uvc_video_chain *chain, struct uvc_entity **_entity) { struct uvc_entity *entity = *_entity; struct uvc_entity *term; int id = -EINVAL, i; switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_EXTENSION_UNIT: case UVC_VC_PROCESSING_UNIT: id = entity->baSourceID[0]; break; case UVC_VC_SELECTOR_UNIT: /* Single-input selector units are ignored. */ if (entity->bNrInPins == 1) { id = entity->baSourceID[0]; break; } uvc_dbg_cont(PROBE, " <- IT"); chain->selector = entity; for (i = 0; i < entity->bNrInPins; ++i) { id = entity->baSourceID[i]; term = uvc_entity_by_id(chain->dev, id); if (term == NULL || !UVC_ENTITY_IS_ITERM(term)) { uvc_dbg(chain->dev, DESCR, "Selector unit %d input %d isn't connected to an input terminal\n", entity->id, i); return -1; } if (term->chain.next || term->chain.prev) { uvc_dbg(chain->dev, DESCR, "Found reference to entity %d already in chain\n", term->id); return -EINVAL; } uvc_dbg_cont(PROBE, " %d", term->id); list_add_tail(&term->chain, &chain->entities); uvc_scan_chain_forward(chain, term, entity); } uvc_dbg_cont(PROBE, "\n"); id = 0; break; case UVC_ITT_VENDOR_SPECIFIC: case UVC_ITT_CAMERA: case UVC_ITT_MEDIA_TRANSPORT_INPUT: case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: case UVC_TT_STREAMING: id = UVC_ENTITY_IS_OTERM(entity) ? entity->baSourceID[0] : 0; break; } if (id <= 0) { *_entity = NULL; return id; } entity = uvc_entity_by_id(chain->dev, id); if (entity == NULL) { uvc_dbg(chain->dev, DESCR, "Found reference to unknown entity %d\n", id); return -EINVAL; } *_entity = entity; return 0; } static int uvc_scan_chain(struct uvc_video_chain *chain, struct uvc_entity *term) { struct uvc_entity *entity, *prev; uvc_dbg(chain->dev, PROBE, "Scanning UVC chain:"); entity = term; prev = NULL; while (entity != NULL) { /* Entity must not be part of an existing chain */ if (entity->chain.next || entity->chain.prev) { uvc_dbg(chain->dev, DESCR, "Found reference to entity %d already in chain\n", entity->id); return -EINVAL; } /* Process entity */ if (uvc_scan_chain_entity(chain, entity) < 0) return -EINVAL; /* Forward scan */ if (uvc_scan_chain_forward(chain, entity, prev) < 0) return -EINVAL; /* Backward scan */ prev = entity; if (uvc_scan_chain_backward(chain, &entity) < 0) return -EINVAL; } return 0; } static unsigned int uvc_print_terms(struct list_head *terms, u16 dir, char *buffer) { struct uvc_entity *term; unsigned int nterms = 0; char *p = buffer; list_for_each_entry(term, terms, chain) { if (!UVC_ENTITY_IS_TERM(term) || UVC_TERM_DIRECTION(term) != dir) continue; if (nterms) p += sprintf(p, ","); if (++nterms >= 4) { p += sprintf(p, "..."); break; } p += sprintf(p, "%u", term->id); } return p - buffer; } static const char *uvc_print_chain(struct uvc_video_chain *chain) { static char buffer[43]; char *p = buffer; p += uvc_print_terms(&chain->entities, UVC_TERM_INPUT, p); p += sprintf(p, " -> "); uvc_print_terms(&chain->entities, UVC_TERM_OUTPUT, p); return buffer; } static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) { struct uvc_video_chain *chain; chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) return NULL; INIT_LIST_HEAD(&chain->entities); mutex_init(&chain->ctrl_mutex); chain->dev = dev; v4l2_prio_init(&chain->prio); return chain; } /* * Fallback heuristic for devices that don't connect units and terminals in a * valid chain. * * Some devices have invalid baSourceID references, causing uvc_scan_chain() * to fail, but if we just take the entities we can find and put them together * in the most sensible chain we can think of, turns out they do work anyway. * Note: This heuristic assumes there is a single chain. * * At the time of writing, devices known to have such a broken chain are * - Acer Integrated Camera (5986:055a) * - Realtek rtl157a7 (0bda:57a7) */ static int uvc_scan_fallback(struct uvc_device *dev) { struct uvc_video_chain *chain; struct uvc_entity *iterm = NULL; struct uvc_entity *oterm = NULL; struct uvc_entity *entity; struct uvc_entity *prev; /* * Start by locating the input and output terminals. We only support * devices with exactly one of each for now. */ list_for_each_entry(entity, &dev->entities, list) { if (UVC_ENTITY_IS_ITERM(entity)) { if (iterm) return -EINVAL; iterm = entity; } if (UVC_ENTITY_IS_OTERM(entity)) { if (oterm) return -EINVAL; oterm = entity; } } if (iterm == NULL || oterm == NULL) return -EINVAL; /* Allocate the chain and fill it. */ chain = uvc_alloc_chain(dev); if (chain == NULL) return -ENOMEM; if (uvc_scan_chain_entity(chain, oterm) < 0) goto error; prev = oterm; /* * Add all Processing and Extension Units with two pads. The order * doesn't matter much, use reverse list traversal to connect units in * UVC descriptor order as we build the chain from output to input. This * leads to units appearing in the order meant by the manufacturer for * the cameras known to require this heuristic. */ list_for_each_entry_reverse(entity, &dev->entities, list) { if (entity->type != UVC_VC_PROCESSING_UNIT && entity->type != UVC_VC_EXTENSION_UNIT) continue; if (entity->num_pads != 2) continue; if (uvc_scan_chain_entity(chain, entity) < 0) goto error; prev->baSourceID[0] = entity->id; prev = entity; } if (uvc_scan_chain_entity(chain, iterm) < 0) goto error; prev->baSourceID[0] = iterm->id; list_add_tail(&chain->list, &dev->chains); uvc_dbg(dev, PROBE, "Found a video chain by fallback heuristic (%s)\n", uvc_print_chain(chain)); return 0; error: kfree(chain); return -EINVAL; } /* * Scan the device for video chains and register video devices. * * Chains are scanned starting at their output terminals and walked backwards. */ static int uvc_scan_device(struct uvc_device *dev) { struct uvc_video_chain *chain; struct uvc_entity *term; list_for_each_entry(term, &dev->entities, list) { if (!UVC_ENTITY_IS_OTERM(term)) continue; /* * If the terminal is already included in a chain, skip it. * This can happen for chains that have multiple output * terminals, where all output terminals beside the first one * will be inserted in the chain in forward scans. */ if (term->chain.next || term->chain.prev) continue; chain = uvc_alloc_chain(dev); if (chain == NULL) return -ENOMEM; term->flags |= UVC_ENTITY_FLAG_DEFAULT; if (uvc_scan_chain(chain, term) < 0) { kfree(chain); continue; } uvc_dbg(dev, PROBE, "Found a valid video chain (%s)\n", uvc_print_chain(chain)); list_add_tail(&chain->list, &dev->chains); } if (list_empty(&dev->chains)) uvc_scan_fallback(dev); if (list_empty(&dev->chains)) { dev_info(&dev->udev->dev, "No valid video chain found.\n"); return -1; } /* Add GPIO entity to the first chain. */ if (dev->gpio_unit) { chain = list_first_entry(&dev->chains, struct uvc_video_chain, list); list_add_tail(&dev->gpio_unit->chain, &chain->entities); } return 0; } /* ------------------------------------------------------------------------ * Video device registration and unregistration */ /* * Delete the UVC device. * * Called by the kernel when the last reference to the uvc_device structure * is released. * * As this function is called after or during disconnect(), all URBs have * already been cancelled by the USB core. There is no need to kill the * interrupt URB manually. */ static void uvc_delete(struct kref *kref) { struct uvc_device *dev = container_of(kref, struct uvc_device, ref); struct list_head *p, *n; uvc_status_cleanup(dev); uvc_ctrl_cleanup_device(dev); usb_put_intf(dev->intf); usb_put_dev(dev->udev); #ifdef CONFIG_MEDIA_CONTROLLER media_device_cleanup(&dev->mdev); #endif list_for_each_safe(p, n, &dev->chains) { struct uvc_video_chain *chain; chain = list_entry(p, struct uvc_video_chain, list); kfree(chain); } list_for_each_safe(p, n, &dev->entities) { struct uvc_entity *entity; entity = list_entry(p, struct uvc_entity, list); #ifdef CONFIG_MEDIA_CONTROLLER uvc_mc_cleanup_entity(entity); #endif kfree(entity); } list_for_each_safe(p, n, &dev->streams) { struct uvc_streaming *streaming; streaming = list_entry(p, struct uvc_streaming, list); usb_driver_release_interface(&uvc_driver.driver, streaming->intf); uvc_stream_delete(streaming); } kfree(dev); } static void uvc_release(struct video_device *vdev) { struct uvc_streaming *stream = video_get_drvdata(vdev); struct uvc_device *dev = stream->dev; kref_put(&dev->ref, uvc_delete); } /* * Unregister the video devices. */ static void uvc_unregister_video(struct uvc_device *dev) { struct uvc_streaming *stream; list_for_each_entry(stream, &dev->streams, list) { if (!video_is_registered(&stream->vdev)) continue; video_unregister_device(&stream->vdev); video_unregister_device(&stream->meta.vdev); uvc_debugfs_cleanup_stream(stream); } uvc_status_unregister(dev); if (dev->vdev.dev) v4l2_device_unregister(&dev->vdev); #ifdef CONFIG_MEDIA_CONTROLLER if (media_devnode_is_registered(dev->mdev.devnode)) media_device_unregister(&dev->mdev); #endif } int uvc_register_video_device(struct uvc_device *dev, struct uvc_streaming *stream, struct video_device *vdev, struct uvc_video_queue *queue, enum v4l2_buf_type type, const struct v4l2_file_operations *fops, const struct v4l2_ioctl_ops *ioctl_ops) { int ret; /* Initialize the video buffers queue. */ ret = uvc_queue_init(queue, type, !uvc_no_drop_param); if (ret) return ret; /* Register the device with V4L. */ /* * We already hold a reference to dev->udev. The video device will be * unregistered before the reference is released, so we don't need to * get another one. */ vdev->v4l2_dev = &dev->vdev; vdev->fops = fops; vdev->ioctl_ops = ioctl_ops; vdev->release = uvc_release; vdev->prio = &stream->chain->prio; if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT) vdev->vfl_dir = VFL_DIR_TX; else vdev->vfl_dir = VFL_DIR_RX; switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: default: vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; break; case V4L2_BUF_TYPE_META_CAPTURE: vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING; break; } strscpy(vdev->name, dev->name, sizeof(vdev->name)); /* * Set the driver data before calling video_register_device, otherwise * the file open() handler might race us. */ video_set_drvdata(vdev, stream); ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(&stream->intf->dev, "Failed to register %s device (%d).\n", v4l2_type_names[type], ret); return ret; } kref_get(&dev->ref); return 0; } static int uvc_register_video(struct uvc_device *dev, struct uvc_streaming *stream) { int ret; /* Initialize the streaming interface with default parameters. */ ret = uvc_video_init(stream); if (ret < 0) { dev_err(&stream->intf->dev, "Failed to initialize the device (%d).\n", ret); return ret; } if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) stream->chain->caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE; else stream->chain->caps |= V4L2_CAP_VIDEO_OUTPUT; uvc_debugfs_init_stream(stream); /* Register the device with V4L. */ return uvc_register_video_device(dev, stream, &stream->vdev, &stream->queue, stream->type, &uvc_fops, &uvc_ioctl_ops); } /* * Register all video devices in all chains. */ static int uvc_register_terms(struct uvc_device *dev, struct uvc_video_chain *chain) { struct uvc_streaming *stream; struct uvc_entity *term; int ret; list_for_each_entry(term, &chain->entities, chain) { if (UVC_ENTITY_TYPE(term) != UVC_TT_STREAMING) continue; stream = uvc_stream_by_id(dev, term->id); if (stream == NULL) { dev_info(&dev->udev->dev, "No streaming interface found for terminal %u.", term->id); continue; } stream->chain = chain; ret = uvc_register_video(dev, stream); if (ret < 0) return ret; /* * Register a metadata node, but ignore a possible failure, * complete registration of video nodes anyway. */ uvc_meta_register(stream); term->vdev = &stream->vdev; } return 0; } static int uvc_register_chains(struct uvc_device *dev) { struct uvc_video_chain *chain; int ret; list_for_each_entry(chain, &dev->chains, list) { ret = uvc_register_terms(dev, chain); if (ret < 0) return ret; #ifdef CONFIG_MEDIA_CONTROLLER ret = uvc_mc_register_entities(chain); if (ret < 0) dev_info(&dev->udev->dev, "Failed to register entities (%d).\n", ret); #endif } return 0; } /* ------------------------------------------------------------------------ * USB probe, disconnect, suspend and resume */ static const struct uvc_device_info uvc_quirk_none = { 0 }; static int uvc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct uvc_device *dev; const struct uvc_device_info *info = (const struct uvc_device_info *)id->driver_info; int function; int ret; /* Allocate memory for the device and initialize it. */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; INIT_LIST_HEAD(&dev->entities); INIT_LIST_HEAD(&dev->chains); INIT_LIST_HEAD(&dev->streams); kref_init(&dev->ref); atomic_set(&dev->nmappings, 0); mutex_init(&dev->lock); dev->udev = usb_get_dev(udev); dev->intf = usb_get_intf(intf); dev->intfnum = intf->cur_altsetting->desc.bInterfaceNumber; dev->info = info ? info : &uvc_quirk_none; dev->quirks = uvc_quirks_param == -1 ? dev->info->quirks : uvc_quirks_param; if (id->idVendor && id->idProduct) uvc_dbg(dev, PROBE, "Probing known UVC device %s (%04x:%04x)\n", udev->devpath, id->idVendor, id->idProduct); else uvc_dbg(dev, PROBE, "Probing generic UVC device %s\n", udev->devpath); if (udev->product != NULL) strscpy(dev->name, udev->product, sizeof(dev->name)); else snprintf(dev->name, sizeof(dev->name), "UVC Camera (%04x:%04x)", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); /* * Add iFunction or iInterface to names when available as additional * distinguishers between interfaces. iFunction is prioritized over * iInterface which matches Windows behavior at the point of writing. */ if (intf->intf_assoc && intf->intf_assoc->iFunction != 0) function = intf->intf_assoc->iFunction; else function = intf->cur_altsetting->desc.iInterface; if (function != 0) { size_t len; strlcat(dev->name, ": ", sizeof(dev->name)); len = strlen(dev->name); usb_string(udev, function, dev->name + len, sizeof(dev->name) - len); } /* Initialize the media device. */ #ifdef CONFIG_MEDIA_CONTROLLER dev->mdev.dev = &intf->dev; strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); if (udev->serial) strscpy(dev->mdev.serial, udev->serial, sizeof(dev->mdev.serial)); usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); media_device_init(&dev->mdev); dev->vdev.mdev = &dev->mdev; #endif /* Parse the Video Class control descriptor. */ if (uvc_parse_control(dev) < 0) { uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n"); goto error; } /* Parse the associated GPIOs. */ if (uvc_gpio_parse(dev) < 0) { uvc_dbg(dev, PROBE, "Unable to parse UVC GPIOs\n"); goto error; } dev_info(&dev->udev->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n", dev->uvc_version >> 8, dev->uvc_version & 0xff, udev->product ? udev->product : "<unnamed>", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); if (dev->quirks != dev->info->quirks) { dev_info(&dev->udev->dev, "Forcing device quirks to 0x%x by module parameter for testing purpose.\n", dev->quirks); dev_info(&dev->udev->dev, "Please report required quirks to the linux-media mailing list.\n"); } if (dev->info->uvc_version) { dev->uvc_version = dev->info->uvc_version; dev_info(&dev->udev->dev, "Forcing UVC version to %u.%02x\n", dev->uvc_version >> 8, dev->uvc_version & 0xff); } /* Register the V4L2 device. */ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) goto error; /* Scan the device for video chains. */ if (uvc_scan_device(dev) < 0) goto error; /* Initialize controls. */ if (uvc_ctrl_init_device(dev) < 0) goto error; /* Register video device nodes. */ if (uvc_register_chains(dev) < 0) goto error; #ifdef CONFIG_MEDIA_CONTROLLER /* Register the media device node */ if (media_device_register(&dev->mdev) < 0) goto error; #endif /* Save our data pointer in the interface data. */ usb_set_intfdata(intf, dev); /* Initialize the interrupt URB. */ ret = uvc_status_init(dev); if (ret < 0) { dev_info(&dev->udev->dev, "Unable to initialize the status endpoint (%d), status interrupt will not be supported.\n", ret); } ret = uvc_gpio_init_irq(dev); if (ret < 0) { dev_err(&dev->udev->dev, "Unable to request privacy GPIO IRQ (%d)\n", ret); goto error; } if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME) udev->quirks &= ~USB_QUIRK_RESET_RESUME; if (!(dev->quirks & UVC_QUIRK_DISABLE_AUTOSUSPEND)) usb_enable_autosuspend(udev); uvc_dbg(dev, PROBE, "UVC device initialized\n"); return 0; error: uvc_unregister_video(dev); kref_put(&dev->ref, uvc_delete); return -ENODEV; } static void uvc_disconnect(struct usb_interface *intf) { struct uvc_device *dev = usb_get_intfdata(intf); /* * Set the USB interface data to NULL. This can be done outside the * lock, as there's no other reader. */ usb_set_intfdata(intf, NULL); if (intf->cur_altsetting->desc.bInterfaceSubClass == UVC_SC_VIDEOSTREAMING) return; uvc_unregister_video(dev); kref_put(&dev->ref, uvc_delete); } static int uvc_suspend(struct usb_interface *intf, pm_message_t message) { struct uvc_device *dev = usb_get_intfdata(intf); struct uvc_streaming *stream; uvc_dbg(dev, SUSPEND, "Suspending interface %u\n", intf->cur_altsetting->desc.bInterfaceNumber); /* Controls are cached on the fly so they don't need to be saved. */ if (intf->cur_altsetting->desc.bInterfaceSubClass == UVC_SC_VIDEOCONTROL) { mutex_lock(&dev->lock); if (dev->users) uvc_status_stop(dev); mutex_unlock(&dev->lock); return 0; } list_for_each_entry(stream, &dev->streams, list) { if (stream->intf == intf) return uvc_video_suspend(stream); } uvc_dbg(dev, SUSPEND, "Suspend: video streaming USB interface mismatch\n"); return -EINVAL; } static int __uvc_resume(struct usb_interface *intf, int reset) { struct uvc_device *dev = usb_get_intfdata(intf); struct uvc_streaming *stream; int ret = 0; uvc_dbg(dev, SUSPEND, "Resuming interface %u\n", intf->cur_altsetting->desc.bInterfaceNumber); if (intf->cur_altsetting->desc.bInterfaceSubClass == UVC_SC_VIDEOCONTROL) { if (reset) { ret = uvc_ctrl_restore_values(dev); if (ret < 0) return ret; } mutex_lock(&dev->lock); if (dev->users) ret = uvc_status_start(dev, GFP_NOIO); mutex_unlock(&dev->lock); return ret; } list_for_each_entry(stream, &dev->streams, list) { if (stream->intf == intf) { ret = uvc_video_resume(stream, reset); if (ret < 0) uvc_queue_streamoff(&stream->queue, stream->queue.queue.type); return ret; } } uvc_dbg(dev, SUSPEND, "Resume: video streaming USB interface mismatch\n"); return -EINVAL; } static int uvc_resume(struct usb_interface *intf) { return __uvc_resume(intf, 0); } static int uvc_reset_resume(struct usb_interface *intf) { return __uvc_resume(intf, 1); } /* ------------------------------------------------------------------------ * Module parameters */ static int uvc_clock_param_get(char *buffer, const struct kernel_param *kp) { if (uvc_clock_param == CLOCK_MONOTONIC) return sprintf(buffer, "CLOCK_MONOTONIC"); else return sprintf(buffer, "CLOCK_REALTIME"); } static int uvc_clock_param_set(const char *val, const struct kernel_param *kp) { if (strncasecmp(val, "clock_", strlen("clock_")) == 0) val += strlen("clock_"); if (strcasecmp(val, "monotonic") == 0) uvc_clock_param = CLOCK_MONOTONIC; else if (strcasecmp(val, "realtime") == 0) uvc_clock_param = CLOCK_REALTIME; else return -EINVAL; return 0; } module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get, &uvc_clock_param, 0644); MODULE_PARM_DESC(clock, "Video buffers timestamp clock"); module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, 0644); MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps"); module_param_named(nodrop, uvc_no_drop_param, uint, 0644); MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames"); module_param_named(quirks, uvc_quirks_param, uint, 0644); MODULE_PARM_DESC(quirks, "Forced device quirks"); module_param_named(trace, uvc_dbg_param, uint, 0644); MODULE_PARM_DESC(trace, "Trace level bitmask"); module_param_named(timeout, uvc_timeout_param, uint, 0644); MODULE_PARM_DESC(timeout, "Streaming control requests timeout"); /* ------------------------------------------------------------------------ * Driver initialization and cleanup */ static const struct uvc_device_info uvc_quirk_probe_minmax = { .quirks = UVC_QUIRK_PROBE_MINMAX, }; static const struct uvc_device_info uvc_quirk_fix_bandwidth = { .quirks = UVC_QUIRK_FIX_BANDWIDTH, }; static const struct uvc_device_info uvc_quirk_probe_def = { .quirks = UVC_QUIRK_PROBE_DEF, }; static const struct uvc_device_info uvc_quirk_stream_no_fid = { .quirks = UVC_QUIRK_STREAM_NO_FID, }; static const struct uvc_device_info uvc_quirk_force_y8 = { .quirks = UVC_QUIRK_FORCE_Y8, }; #define UVC_INFO_QUIRK(q) (kernel_ulong_t)&(struct uvc_device_info){.quirks = q} #define UVC_INFO_META(m) (kernel_ulong_t)&(struct uvc_device_info) \ {.meta_format = m} /* * The Logitech cameras listed below have their interface class set to * VENDOR_SPEC because they don't announce themselves as UVC devices, even * though they are compliant. */ static const struct usb_device_id uvc_ids[] = { /* Quanta ACER HD User Facing */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0408, .idProduct = 0x4035, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = UVC_PC_PROTOCOL_15, .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){ .uvc_version = 0x010a, } }, /* LogiLink Wireless Webcam */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0416, .idProduct = 0xa91a, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Genius eFace 2025 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0458, .idProduct = 0x706e, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Microsoft Lifecam NX-6000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x045e, .idProduct = 0x00f8, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Microsoft Lifecam NX-3000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x045e, .idProduct = 0x0721, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Microsoft Lifecam VX-7000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x045e, .idProduct = 0x0723, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Logitech, Webcam C910 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x0821, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, /* Logitech, Webcam B910 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x0823, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, /* Logitech Quickcam Fusion */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c1, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam Orbit MP */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam Pro for Notebook */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c3, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam Pro 5000 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c5, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam OEM Dell Notebook */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c6, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech Quickcam OEM Cisco VT Camera II */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08c7, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Logitech HD Pro Webcam C920 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x082d, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT | UVC_QUIRK_INVALID_DEVICE_SOF) }, /* Logitech HD Pro Webcam C922 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x085c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_INVALID_DEVICE_SOF) }, /* Logitech Rally Bar Huddle */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x087c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, /* Logitech Rally Bar */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x089b, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, /* Logitech Rally Bar Mini */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x046d, .idProduct = 0x08d3, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, /* Chicony CNF7129 (Asus EEE 100HE) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x04f2, .idProduct = 0xb071, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTRICT_FRAME_RATE) }, /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x058f, .idProduct = 0x3820, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Dell XPS m1530 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x2640, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell SP2008WFP Monitor */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x2641, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell Alienware X51 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x2643, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell Studio Hybrid 140g (OmniVision webcam) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x264a, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Dell XPS M1330 (OmniVision OV7670 webcam) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05a9, .idProduct = 0x7670, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Apple Built-In iSight */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05ac, .idProduct = 0x8501, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_BUILTIN_ISIGHT) }, /* Apple FaceTime HD Camera (Built-In) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05ac, .idProduct = 0x8514, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Apple Built-In iSight via iBridge */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05ac, .idProduct = 0x8600, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* Foxlink ("HP Webcam" on HP Mini 5103) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05c8, .idProduct = 0x0403, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* Genesys Logic USB 2.0 PC Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x05e3, .idProduct = 0x0505, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Hercules Classic Silver */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x06f8, .idProduct = 0x300c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* ViMicro Vega */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0ac8, .idProduct = 0x332d, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* ViMicro - Minoru3D */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0ac8, .idProduct = 0x3410, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* ViMicro Venus - Minoru3D */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0ac8, .idProduct = 0x3420, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_fix_bandwidth }, /* Ophir Optronics - SPCAM 620U */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0bd3, .idProduct = 0x0555, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* MT6227 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x0e8d, .idProduct = 0x0004, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_PROBE_DEF) }, /* IMC Networks (Medion Akoya) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x13d3, .idProduct = 0x5103, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* JMicron USB2.0 XGA WebCam */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x152d, .idProduct = 0x0310, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Syntek (HP Spartan) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x5212, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Samsung Q310) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x5931, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Packard Bell EasyNote MX52 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a12, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Asus F9SG) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a31, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (Asus U3S) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a33, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Syntek (JAOtech Smart Terminal) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x174f, .idProduct = 0x8a34, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Miricle 307K */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x17dc, .idProduct = 0x0202, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Lenovo Thinkpad SL400/SL500 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x17ef, .idProduct = 0x480b, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_stream_no_fid }, /* Aveo Technology USB 2.0 Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1871, .idProduct = 0x0306, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_PROBE_EXTRAFIELDS) }, /* Aveo Technology USB 2.0 Camera (Tasco USB Microscope) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1871, .idProduct = 0x0516, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Ecamm Pico iMage */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18cd, .idProduct = 0xcafe, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_EXTRAFIELDS) }, /* Manta MM-353 Plako */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18ec, .idProduct = 0x3188, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* FSC WebCam V30S */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18ec, .idProduct = 0x3288, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Arkmicro unbranded */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x18ec, .idProduct = 0x3290, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_def }, /* The Imaging Source USB CCD cameras */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x199e, .idProduct = 0x8102, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, /* Bodelin ProScopeHR */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_HI | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x19ab, .idProduct = 0x1000, .bcdDevice_hi = 0x0126, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_STATUS_INTERVAL) }, /* MSI StarCam 370i */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1b3b, .idProduct = 0x2951, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Generalplus Technology Inc. 808 Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1b3f, .idProduct = 0x2002, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Shenzhen Aoni Electronic Co.,Ltd 2K FHD camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1bcf, .idProduct = 0x0b40, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){ .uvc_version = 0x010a, } }, /* SiGma Micro USB Web Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1c4f, .idProduct = 0x3000, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_IGNORE_SELECTOR_UNIT) }, /* Oculus VR Positional Tracker DK2 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x2833, .idProduct = 0x0201, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_force_y8 }, /* Oculus VR Rift Sensor */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x2833, .idProduct = 0x0211, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_force_y8 }, /* GEO Semiconductor GC6500 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x29fe, .idProduct = 0x4d53, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) }, /* Insta360 Link */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x2e1a, .idProduct = 0x4c01, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) }, /* Intel D410/ASR depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0ad2, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D415/ASRC depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0ad3, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D430/AWG depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0ad4, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel RealSense D4M */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b03, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D435/AWGC depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b07, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D435i depth camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b3a, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D405 Depth Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b5b, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Intel D455 Depth Camera */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x8086, .idProduct = 0x0b5c, .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) }, /* Generic USB Video Class */ { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) }, { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) }, {} }; MODULE_DEVICE_TABLE(usb, uvc_ids); struct uvc_driver uvc_driver = { .driver = { .name = "uvcvideo", .probe = uvc_probe, .disconnect = uvc_disconnect, .suspend = uvc_suspend, .resume = uvc_resume, .reset_resume = uvc_reset_resume, .id_table = uvc_ids, .supports_autosuspend = 1, }, }; static int __init uvc_init(void) { int ret; uvc_debugfs_init(); ret = usb_register(&uvc_driver.driver); if (ret < 0) { uvc_debugfs_cleanup(); return ret; } return 0; } static void __exit uvc_cleanup(void) { usb_deregister(&uvc_driver.driver); uvc_debugfs_cleanup(); } module_init(uvc_init); module_exit(uvc_cleanup); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); |
15 15 15 15 15 15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018, 2021-2024 Intel Corporation */ #ifndef __CFG80211_RDEV_OPS #define __CFG80211_RDEV_OPS #include <linux/rtnetlink.h> #include <net/cfg80211.h> #include "core.h" #include "trace.h" static inline int rdev_suspend(struct cfg80211_registered_device *rdev, struct cfg80211_wowlan *wowlan) { int ret; trace_rdev_suspend(&rdev->wiphy, wowlan); ret = rdev->ops->suspend(&rdev->wiphy, wowlan); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_resume(struct cfg80211_registered_device *rdev) { int ret; trace_rdev_resume(&rdev->wiphy); ret = rdev->ops->resume(&rdev->wiphy); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev, bool enabled) { trace_rdev_set_wakeup(&rdev->wiphy, enabled); rdev->ops->set_wakeup(&rdev->wiphy, enabled); trace_rdev_return_void(&rdev->wiphy); } static inline struct wireless_dev *rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params) { struct wireless_dev *ret; trace_rdev_add_virtual_intf(&rdev->wiphy, name, type); ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, name_assign_type, type, params); trace_rdev_return_wdev(&rdev->wiphy, ret); return ret; } static inline int rdev_del_virtual_intf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { int ret; trace_rdev_del_virtual_intf(&rdev->wiphy, wdev); ret = rdev->ops->del_virtual_intf(&rdev->wiphy, wdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_virtual_intf(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params) { int ret; trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type); ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { int ret; trace_rdev_add_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr, params->mode); ret = rdev->ops->add_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params*)) { int ret; trace_rdev_get_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr); ret = rdev->ops->get_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr, cookie, callback); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr) { int ret; trace_rdev_del_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr); ret = rdev->ops->del_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool unicast, bool multicast) { int ret; trace_rdev_set_default_key(&rdev->wiphy, netdev, link_id, key_index, unicast, multicast); ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, link_id, key_index, unicast, multicast); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index) { int ret; trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, link_id, key_index); ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev, link_id, key_index); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_beacon_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index) { int ret; trace_rdev_set_default_beacon_key(&rdev->wiphy, netdev, link_id, key_index); ret = rdev->ops->set_default_beacon_key(&rdev->wiphy, netdev, link_id, key_index); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_start_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ap_settings *settings) { int ret; trace_rdev_start_ap(&rdev->wiphy, dev, settings); ret = rdev->ops->start_ap(&rdev->wiphy, dev, settings); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ap_update *info) { int ret; trace_rdev_change_beacon(&rdev->wiphy, dev, info); ret = rdev->ops->change_beacon(&rdev->wiphy, dev, info); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id) { int ret; trace_rdev_stop_ap(&rdev->wiphy, dev, link_id); ret = rdev->ops->stop_ap(&rdev->wiphy, dev, link_id); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_station(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *mac, struct station_parameters *params) { int ret; trace_rdev_add_station(&rdev->wiphy, dev, mac, params); ret = rdev->ops->add_station(&rdev->wiphy, dev, mac, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct station_del_parameters *params) { int ret; trace_rdev_del_station(&rdev->wiphy, dev, params); ret = rdev->ops->del_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_station(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *mac, struct station_parameters *params) { int ret; trace_rdev_change_station(&rdev->wiphy, dev, mac, params); ret = rdev->ops->change_station(&rdev->wiphy, dev, mac, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_station(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { int ret; trace_rdev_get_station(&rdev->wiphy, dev, mac); ret = rdev->ops->get_station(&rdev->wiphy, dev, mac, sinfo); trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); return ret; } static inline int rdev_dump_station(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { int ret; trace_rdev_dump_station(&rdev->wiphy, dev, idx, mac); ret = rdev->ops->dump_station(&rdev->wiphy, dev, idx, mac, sinfo); trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); return ret; } static inline int rdev_add_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop) { int ret; trace_rdev_add_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst) { int ret; trace_rdev_del_mpath(&rdev->wiphy, dev, dst); ret = rdev->ops->del_mpath(&rdev->wiphy, dev, dst); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop) { int ret; trace_rdev_change_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { int ret; trace_rdev_get_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_get_mpp(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { int ret; trace_rdev_get_mpp(&rdev->wiphy, dev, dst, mpp); ret = rdev->ops->get_mpp(&rdev->wiphy, dev, dst, mpp, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { int ret; trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop); ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_dump_mpp(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { int ret; trace_rdev_dump_mpp(&rdev->wiphy, dev, idx, dst, mpp); ret = rdev->ops->dump_mpp(&rdev->wiphy, dev, idx, dst, mpp, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_get_mesh_config(struct cfg80211_registered_device *rdev, struct net_device *dev, struct mesh_config *conf) { int ret; trace_rdev_get_mesh_config(&rdev->wiphy, dev); ret = rdev->ops->get_mesh_config(&rdev->wiphy, dev, conf); trace_rdev_return_int_mesh_config(&rdev->wiphy, ret, conf); return ret; } static inline int rdev_update_mesh_config(struct cfg80211_registered_device *rdev, struct net_device *dev, u32 mask, const struct mesh_config *nconf) { int ret; trace_rdev_update_mesh_config(&rdev->wiphy, dev, mask, nconf); ret = rdev->ops->update_mesh_config(&rdev->wiphy, dev, mask, nconf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup) { int ret; trace_rdev_join_mesh(&rdev->wiphy, dev, conf, setup); ret = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_mesh(&rdev->wiphy, dev); ret = rdev->ops->leave_mesh(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup) { int ret; trace_rdev_join_ocb(&rdev->wiphy, dev, setup); ret = rdev->ops->join_ocb(&rdev->wiphy, dev, setup); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_ocb(&rdev->wiphy, dev); ret = rdev->ops->leave_ocb(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_bss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct bss_parameters *params) { int ret; trace_rdev_change_bss(&rdev->wiphy, dev, params); ret = rdev->ops->change_bss(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_inform_bss(struct cfg80211_registered_device *rdev, struct cfg80211_bss *bss, const struct cfg80211_bss_ies *ies, void *drv_data) { trace_rdev_inform_bss(&rdev->wiphy, bss); if (rdev->ops->inform_bss) rdev->ops->inform_bss(&rdev->wiphy, bss, ies, drv_data); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_txq_params *params) { int ret; trace_rdev_set_txq_params(&rdev->wiphy, dev, params); ret = rdev->ops->set_txq_params(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_libertas_set_mesh_channel(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan) { int ret; trace_rdev_libertas_set_mesh_channel(&rdev->wiphy, dev, chan); ret = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, dev, chan); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_monitor_channel(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_set_monitor_channel(&rdev->wiphy, chandef); ret = rdev->ops->set_monitor_channel(&rdev->wiphy, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_scan(struct cfg80211_registered_device *rdev, struct cfg80211_scan_request *request) { int ret; if (WARN_ON_ONCE(!request->n_ssids && request->ssids)) return -EINVAL; trace_rdev_scan(&rdev->wiphy, request); ret = rdev->ops->scan(&rdev->wiphy, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_abort_scan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_abort_scan(&rdev->wiphy, wdev); rdev->ops->abort_scan(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_auth_request *req) { int ret; trace_rdev_auth(&rdev->wiphy, dev, req); ret = rdev->ops->auth(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_assoc_request *req) { int ret; trace_rdev_assoc(&rdev->wiphy, dev, req); ret = rdev->ops->assoc(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_deauth_request *req) { int ret; trace_rdev_deauth(&rdev->wiphy, dev, req); ret = rdev->ops->deauth(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_disassoc_request *req) { int ret; trace_rdev_disassoc(&rdev->wiphy, dev, req); ret = rdev->ops->disassoc(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *sme) { int ret; trace_rdev_connect(&rdev->wiphy, dev, sme); ret = rdev->ops->connect(&rdev->wiphy, dev, sme); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_update_connect_params(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *sme, u32 changed) { int ret; trace_rdev_update_connect_params(&rdev->wiphy, dev, sme, changed); ret = rdev->ops->update_connect_params(&rdev->wiphy, dev, sme, changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason_code) { int ret; trace_rdev_disconnect(&rdev->wiphy, dev, reason_code); ret = rdev->ops->disconnect(&rdev->wiphy, dev, reason_code); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params) { int ret; trace_rdev_join_ibss(&rdev->wiphy, dev, params); ret = rdev->ops->join_ibss(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_ibss(&rdev->wiphy, dev); ret = rdev->ops->leave_ibss(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) { int ret = -EOPNOTSUPP; trace_rdev_set_wiphy_params(&rdev->wiphy, changed); if (rdev->ops->set_wiphy_params) ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { int ret; trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm); ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int *dbm) { int ret; trace_rdev_get_tx_power(&rdev->wiphy, wdev); ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm); trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm); return ret; } static inline int rdev_set_multicast_to_unicast(struct cfg80211_registered_device *rdev, struct net_device *dev, const bool enabled) { int ret; trace_rdev_set_multicast_to_unicast(&rdev->wiphy, dev, enabled); ret = rdev->ops->set_multicast_to_unicast(&rdev->wiphy, dev, enabled); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_txq_stats(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_txq_stats *txqstats) { int ret; trace_rdev_get_txq_stats(&rdev->wiphy, wdev); ret = rdev->ops->get_txq_stats(&rdev->wiphy, wdev, txqstats); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev) { trace_rdev_rfkill_poll(&rdev->wiphy); rdev->ops->rfkill_poll(&rdev->wiphy); trace_rdev_return_void(&rdev->wiphy); } #ifdef CONFIG_NL80211_TESTMODE static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, void *data, int len) { int ret; trace_rdev_testmode_cmd(&rdev->wiphy, wdev); ret = rdev->ops->testmode_cmd(&rdev->wiphy, wdev, data, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_testmode_dump(struct cfg80211_registered_device *rdev, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len) { int ret; trace_rdev_testmode_dump(&rdev->wiphy); ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } #endif static inline int rdev_set_bitrate_mask(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id, const u8 *peer, const struct cfg80211_bitrate_mask *mask) { int ret; trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, link_id, peer, mask); ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, link_id, peer, mask); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_dump_survey(struct cfg80211_registered_device *rdev, struct net_device *netdev, int idx, struct survey_info *info) { int ret; trace_rdev_dump_survey(&rdev->wiphy, netdev, idx); ret = rdev->ops->dump_survey(&rdev->wiphy, netdev, idx, info); if (ret < 0) trace_rdev_return_int(&rdev->wiphy, ret); else trace_rdev_return_int_survey_info(&rdev->wiphy, ret, info); return ret; } static inline int rdev_set_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { int ret; trace_rdev_set_pmksa(&rdev->wiphy, netdev, pmksa); ret = rdev->ops->set_pmksa(&rdev->wiphy, netdev, pmksa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { int ret; trace_rdev_del_pmksa(&rdev->wiphy, netdev, pmksa); ret = rdev->ops->del_pmksa(&rdev->wiphy, netdev, pmksa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_flush_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev) { int ret; trace_rdev_flush_pmksa(&rdev->wiphy, netdev); ret = rdev->ops->flush_pmksa(&rdev->wiphy, netdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_remain_on_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie) { int ret; trace_rdev_remain_on_channel(&rdev->wiphy, wdev, chan, duration); ret = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan, duration, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_cancel_remain_on_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { int ret; trace_rdev_cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); ret = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mgmt_tx(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { int ret; trace_rdev_mgmt_tx(&rdev->wiphy, wdev, params); ret = rdev->ops->mgmt_tx(&rdev->wiphy, wdev, params, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_tx_control_port(struct cfg80211_registered_device *rdev, struct net_device *dev, const void *buf, size_t len, const u8 *dest, __be16 proto, const bool noencrypt, int link, u64 *cookie) { int ret; trace_rdev_tx_control_port(&rdev->wiphy, dev, buf, len, dest, proto, noencrypt, link); ret = rdev->ops->tx_control_port(&rdev->wiphy, dev, buf, len, dest, proto, noencrypt, link, cookie); if (cookie) trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); else trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mgmt_tx_cancel_wait(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { int ret; trace_rdev_mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); ret = rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_power_mgmt(struct cfg80211_registered_device *rdev, struct net_device *dev, bool enabled, int timeout) { int ret; trace_rdev_set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); ret = rdev->ops->set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_rssi_config(struct cfg80211_registered_device *rdev, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { int ret; trace_rdev_set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, rssi_hyst); ret = rdev->ops->set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, rssi_hyst); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_rssi_range_config(struct cfg80211_registered_device *rdev, struct net_device *dev, s32 low, s32 high) { int ret; trace_rdev_set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high); ret = rdev->ops->set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev, struct net_device *dev, u32 rate, u32 pkts, u32 intvl) { int ret; trace_rdev_set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); ret = rdev->ops->set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_update_mgmt_frame_registrations(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct mgmt_frame_regs *upd) { might_sleep(); trace_rdev_update_mgmt_frame_registrations(&rdev->wiphy, wdev, upd); if (rdev->ops->update_mgmt_frame_registrations) rdev->ops->update_mgmt_frame_registrations(&rdev->wiphy, wdev, upd); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev, u32 tx_ant, u32 rx_ant) { int ret; trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant); ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev, u32 *tx_ant, u32 *rx_ant) { int ret; trace_rdev_get_antenna(&rdev->wiphy); ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant); if (ret) trace_rdev_return_int(&rdev->wiphy, ret); else trace_rdev_return_int_tx_rx(&rdev->wiphy, ret, *tx_ant, *rx_ant); return ret; } static inline int rdev_sched_scan_start(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_sched_scan_request *request) { int ret; trace_rdev_sched_scan_start(&rdev->wiphy, dev, request->reqid); ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev, struct net_device *dev, u64 reqid) { int ret; trace_rdev_sched_scan_stop(&rdev->wiphy, dev, reqid); ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev, reqid); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_gtk_rekey_data *data) { int ret; trace_rdev_set_rekey_data(&rdev->wiphy, dev); ret = rdev->ops->set_rekey_data(&rdev->wiphy, dev, data); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *peer, int link_id, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len) { int ret; trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, buf, len); ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, buf, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_oper(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *peer, enum nl80211_tdls_operation oper) { int ret; trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper); ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_probe_client(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *peer, u64 *cookie) { int ret; trace_rdev_probe_client(&rdev->wiphy, dev, peer); ret = rdev->ops->probe_client(&rdev->wiphy, dev, peer, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 noack_map) { int ret; trace_rdev_set_noack_map(&rdev->wiphy, dev, noack_map); ret = rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, unsigned int link_id, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_get_channel(&rdev->wiphy, wdev, link_id); ret = rdev->ops->get_channel(&rdev->wiphy, wdev, link_id, chandef); trace_rdev_return_chandef(&rdev->wiphy, ret, chandef); return ret; } static inline int rdev_start_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { int ret; trace_rdev_start_p2p_device(&rdev->wiphy, wdev); ret = rdev->ops->start_p2p_device(&rdev->wiphy, wdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_stop_p2p_device(&rdev->wiphy, wdev); rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_start_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf) { int ret; trace_rdev_start_nan(&rdev->wiphy, wdev, conf); ret = rdev->ops->start_nan(&rdev->wiphy, wdev, conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_stop_nan(&rdev->wiphy, wdev); rdev->ops->stop_nan(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_add_nan_func(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_func *nan_func) { int ret; trace_rdev_add_nan_func(&rdev->wiphy, wdev, nan_func); ret = rdev->ops->add_nan_func(&rdev->wiphy, wdev, nan_func); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_del_nan_func(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { trace_rdev_del_nan_func(&rdev->wiphy, wdev, cookie); rdev->ops->del_nan_func(&rdev->wiphy, wdev, cookie); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_nan_change_conf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes) { int ret; trace_rdev_nan_change_conf(&rdev->wiphy, wdev, conf, changes); if (rdev->ops->nan_change_conf) ret = rdev->ops->nan_change_conf(&rdev->wiphy, wdev, conf, changes); else ret = -EOPNOTSUPP; trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_acl_data *params) { int ret; trace_rdev_set_mac_acl(&rdev->wiphy, dev, params); ret = rdev->ops->set_mac_acl(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_update_ft_ies(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie) { int ret; trace_rdev_update_ft_ies(&rdev->wiphy, dev, ftie); ret = rdev->ops->update_ft_ies(&rdev->wiphy, dev, ftie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_crit_proto_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_crit_proto_id protocol, u16 duration) { int ret; trace_rdev_crit_proto_start(&rdev->wiphy, wdev, protocol, duration); ret = rdev->ops->crit_proto_start(&rdev->wiphy, wdev, protocol, duration); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_crit_proto_stop(&rdev->wiphy, wdev); rdev->ops->crit_proto_stop(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_csa_settings *params) { int ret; trace_rdev_channel_switch(&rdev->wiphy, dev, params); ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_qos_map *qos_map) { int ret = -EOPNOTSUPP; if (rdev->ops->set_qos_map) { trace_rdev_set_qos_map(&rdev->wiphy, dev, qos_map); ret = rdev->ops->set_qos_map(&rdev->wiphy, dev, qos_map); trace_rdev_return_int(&rdev->wiphy, ret); } return ret; } static inline int rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, link_id, chandef); ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, link_id, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_tx_ts(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time) { int ret = -EOPNOTSUPP; trace_rdev_add_tx_ts(&rdev->wiphy, dev, tsid, peer, user_prio, admitted_time); if (rdev->ops->add_tx_ts) ret = rdev->ops->add_tx_ts(&rdev->wiphy, dev, tsid, peer, user_prio, admitted_time); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_tx_ts(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 tsid, const u8 *peer) { int ret = -EOPNOTSUPP; trace_rdev_del_tx_ts(&rdev->wiphy, dev, tsid, peer); if (rdev->ops->del_tx_ts) ret = rdev->ops->del_tx_ts(&rdev->wiphy, dev, tsid, peer); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class, chandef); ret = rdev->ops->tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_tdls_cancel_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *addr) { trace_rdev_tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); rdev->ops->tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_start_radar_detection(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms, int link_id) { int ret = -EOPNOTSUPP; trace_rdev_start_radar_detection(&rdev->wiphy, dev, chandef, cac_time_ms, link_id); if (rdev->ops->start_radar_detection) ret = rdev->ops->start_radar_detection(&rdev->wiphy, dev, chandef, cac_time_ms, link_id); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_end_cac(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id) { trace_rdev_end_cac(&rdev->wiphy, dev, link_id); if (rdev->ops->end_cac) rdev->ops->end_cac(&rdev->wiphy, dev, link_id); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_set_mcast_rate(struct cfg80211_registered_device *rdev, struct net_device *dev, int mcast_rate[NUM_NL80211_BANDS]) { int ret = -EOPNOTSUPP; trace_rdev_set_mcast_rate(&rdev->wiphy, dev, mcast_rate); if (rdev->ops->set_mcast_rate) ret = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_coalesce(struct cfg80211_registered_device *rdev, struct cfg80211_coalesce *coalesce) { int ret = -EOPNOTSUPP; trace_rdev_set_coalesce(&rdev->wiphy, coalesce); if (rdev->ops->set_coalesce) ret = rdev->ops->set_coalesce(&rdev->wiphy, coalesce); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_pmk(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_pmk_conf *pmk_conf) { int ret = -EOPNOTSUPP; trace_rdev_set_pmk(&rdev->wiphy, dev, pmk_conf); if (rdev->ops->set_pmk) ret = rdev->ops->set_pmk(&rdev->wiphy, dev, pmk_conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_pmk(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *aa) { int ret = -EOPNOTSUPP; trace_rdev_del_pmk(&rdev->wiphy, dev, aa); if (rdev->ops->del_pmk) ret = rdev->ops->del_pmk(&rdev->wiphy, dev, aa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_external_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_external_auth_params *params) { int ret = -EOPNOTSUPP; trace_rdev_external_auth(&rdev->wiphy, dev, params); if (rdev->ops->external_auth) ret = rdev->ops->external_auth(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_ftm_responder_stats(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats) { int ret = -EOPNOTSUPP; trace_rdev_get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats); if (rdev->ops->get_ftm_responder_stats) ret = rdev->ops->get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_start_pmsr(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request) { int ret = -EOPNOTSUPP; trace_rdev_start_pmsr(&rdev->wiphy, wdev, request->cookie); if (rdev->ops->start_pmsr) ret = rdev->ops->start_pmsr(&rdev->wiphy, wdev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_abort_pmsr(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request) { trace_rdev_abort_pmsr(&rdev->wiphy, wdev, request->cookie); if (rdev->ops->abort_pmsr) rdev->ops->abort_pmsr(&rdev->wiphy, wdev, request); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_update_owe_info(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_update_owe_info *oweinfo) { int ret = -EOPNOTSUPP; trace_rdev_update_owe_info(&rdev->wiphy, dev, oweinfo); if (rdev->ops->update_owe_info) ret = rdev->ops->update_owe_info(&rdev->wiphy, dev, oweinfo); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_probe_mesh_link(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *dest, const void *buf, size_t len) { int ret; trace_rdev_probe_mesh_link(&rdev->wiphy, dev, dest, buf, len); ret = rdev->ops->probe_mesh_link(&rdev->wiphy, dev, buf, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_tid_config(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_tid_config *tid_conf) { int ret; trace_rdev_set_tid_config(&rdev->wiphy, dev, tid_conf); ret = rdev->ops->set_tid_config(&rdev->wiphy, dev, tid_conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_reset_tid_config(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *peer, u8 tids) { int ret; trace_rdev_reset_tid_config(&rdev->wiphy, dev, peer, tids); ret = rdev->ops->reset_tid_config(&rdev->wiphy, dev, peer, tids); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_sar_specs(struct cfg80211_registered_device *rdev, struct cfg80211_sar_specs *sar) { int ret; trace_rdev_set_sar_specs(&rdev->wiphy, sar); ret = rdev->ops->set_sar_specs(&rdev->wiphy, sar); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_color_change(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_color_change_settings *params) { int ret; trace_rdev_color_change(&rdev->wiphy, dev, params); ret = rdev->ops->color_change(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_fils_aad(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_fils_aad *fils_aad) { int ret = -EOPNOTSUPP; trace_rdev_set_fils_aad(&rdev->wiphy, dev, fils_aad); if (rdev->ops->set_fils_aad) ret = rdev->ops->set_fils_aad(&rdev->wiphy, dev, fils_aad); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_radar_background(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { struct wiphy *wiphy = &rdev->wiphy; int ret = -EOPNOTSUPP; trace_rdev_set_radar_background(wiphy, chandef); if (rdev->ops->set_radar_background) ret = rdev->ops->set_radar_background(wiphy, chandef); trace_rdev_return_int(wiphy, ret); return ret; } static inline int rdev_add_intf_link(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, unsigned int link_id) { int ret = 0; trace_rdev_add_intf_link(&rdev->wiphy, wdev, link_id); if (rdev->ops->add_intf_link) ret = rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_del_intf_link(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, unsigned int link_id) { trace_rdev_del_intf_link(&rdev->wiphy, wdev, link_id); if (rdev->ops->del_intf_link) rdev->ops->del_intf_link(&rdev->wiphy, wdev, link_id); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_add_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_parameters *params) { int ret = -EOPNOTSUPP; trace_rdev_add_link_station(&rdev->wiphy, dev, params); if (rdev->ops->add_link_station) ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mod_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_parameters *params) { int ret = -EOPNOTSUPP; trace_rdev_mod_link_station(&rdev->wiphy, dev, params); if (rdev->ops->mod_link_station) ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_del_parameters *params) { int ret = -EOPNOTSUPP; trace_rdev_del_link_station(&rdev->wiphy, dev, params); if (rdev->ops->del_link_station) ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_hw_timestamp(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_set_hw_timestamp *hwts) { struct wiphy *wiphy = &rdev->wiphy; int ret = -EOPNOTSUPP; trace_rdev_set_hw_timestamp(wiphy, dev, hwts); if (rdev->ops->set_hw_timestamp) ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts); trace_rdev_return_int(wiphy, ret); return ret; } static inline int rdev_set_ttlm(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ttlm_params *params) { struct wiphy *wiphy = &rdev->wiphy; int ret = -EOPNOTSUPP; trace_rdev_set_ttlm(wiphy, dev, params); if (rdev->ops->set_ttlm) ret = rdev->ops->set_ttlm(wiphy, dev, params); trace_rdev_return_int(wiphy, ret); return ret; } static inline u32 rdev_get_radio_mask(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wiphy *wiphy = &rdev->wiphy; if (!rdev->ops->get_radio_mask) return 0; return rdev->ops->get_radio_mask(wiphy, dev); } #endif /* __CFG80211_RDEV_OPS */ |
133 133 133 133 133 133 133 133 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 | // SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/char/misc.c * * Generic misc open routine by Johan Myreen * * Based on code from Linus * * Teemu Rantanen's Microsoft Busmouse support and Derrick Cole's * changes incorporated into 0.97pl4 * by Peter Cervasio (pete%q106fm.uucp@wupost.wustl.edu) (08SEP92) * See busmouse.c for particulars. * * Made things a lot mode modular - easy to compile in just one or two * of the misc drivers, as they are now completely independent. Linus. * * Support for loadable modules. 8-Sep-95 Philip Blundell <pjb27@cam.ac.uk> * * Fixed a failing symbol register to free the device registration * Alan Cox <alan@lxorguk.ukuu.org.uk> 21-Jan-96 * * Dynamic minors and /proc/mice by Alessandro Rubini. 26-Mar-96 * * Renamed to misc and miscdevice to be more accurate. Alan Cox 26-Mar-96 * * Handling of mouse minor numbers for kerneld: * Idea by Jacques Gelinas <jack@solucorp.qc.ca>, * adapted by Bjorn Ekwall <bj0rn@blox.se> * corrected by Alan Cox <alan@lxorguk.ukuu.org.uk> * * Changes for kmod (from kerneld): * Cyrus Durgin <cider@speakeasy.org> * * Added devfs support. Richard Gooch <rgooch@atnf.csiro.au> 10-Jan-1998 */ #include <linux/module.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/device.h> #include <linux/tty.h> #include <linux/kmod.h> #include <linux/gfp.h> /* * Head entry for the doubly linked miscdevice list */ static LIST_HEAD(misc_list); static DEFINE_MUTEX(misc_mtx); /* * Assigned numbers, used for dynamic minors */ #define DYNAMIC_MINORS 128 /* like dynamic majors */ static DEFINE_IDA(misc_minors_ida); static int misc_minor_alloc(void) { int ret; ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL); if (ret >= 0) { ret = DYNAMIC_MINORS - ret - 1; } else { ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1, MINORMASK, GFP_KERNEL); } return ret; } static void misc_minor_free(int minor) { if (minor < DYNAMIC_MINORS) ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1); else if (minor > MISC_DYNAMIC_MINOR) ida_free(&misc_minors_ida, minor); } #ifdef CONFIG_PROC_FS static void *misc_seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&misc_mtx); return seq_list_start(&misc_list, *pos); } static void *misc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &misc_list, pos); } static void misc_seq_stop(struct seq_file *seq, void *v) { mutex_unlock(&misc_mtx); } static int misc_seq_show(struct seq_file *seq, void *v) { const struct miscdevice *p = list_entry(v, struct miscdevice, list); seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : ""); return 0; } static const struct seq_operations misc_seq_ops = { .start = misc_seq_start, .next = misc_seq_next, .stop = misc_seq_stop, .show = misc_seq_show, }; #endif static int misc_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct miscdevice *c = NULL, *iter; int err = -ENODEV; const struct file_operations *new_fops = NULL; mutex_lock(&misc_mtx); list_for_each_entry(iter, &misc_list, list) { if (iter->minor != minor) continue; c = iter; new_fops = fops_get(iter->fops); break; } if (!new_fops) { mutex_unlock(&misc_mtx); request_module("char-major-%d-%d", MISC_MAJOR, minor); mutex_lock(&misc_mtx); list_for_each_entry(iter, &misc_list, list) { if (iter->minor != minor) continue; c = iter; new_fops = fops_get(iter->fops); break; } if (!new_fops) goto fail; } /* * Place the miscdevice in the file's * private_data so it can be used by the * file operations, including f_op->open below */ file->private_data = c; err = 0; replace_fops(file, new_fops); if (file->f_op->open) err = file->f_op->open(inode, file); fail: mutex_unlock(&misc_mtx); return err; } static char *misc_devnode(const struct device *dev, umode_t *mode) { const struct miscdevice *c = dev_get_drvdata(dev); if (mode && c->mode) *mode = c->mode; if (c->nodename) return kstrdup(c->nodename, GFP_KERNEL); return NULL; } static const struct class misc_class = { .name = "misc", .devnode = misc_devnode, }; static const struct file_operations misc_fops = { .owner = THIS_MODULE, .open = misc_open, .llseek = noop_llseek, }; /** * misc_register - register a miscellaneous device * @misc: device structure * * Register a miscellaneous device with the kernel. If the minor * number is set to %MISC_DYNAMIC_MINOR a minor number is assigned * and placed in the minor field of the structure. For other cases * the minor number requested is used. * * The structure passed is linked into the kernel and may not be * destroyed until it has been unregistered. By default, an open() * syscall to the device sets file->private_data to point to the * structure. Drivers don't need open in fops for this. * * A zero is returned on success and a negative errno code for * failure. */ int misc_register(struct miscdevice *misc) { dev_t dev; int err = 0; bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR); INIT_LIST_HEAD(&misc->list); mutex_lock(&misc_mtx); if (is_dynamic) { int i = misc_minor_alloc(); if (i < 0) { err = -EBUSY; goto out; } misc->minor = i; } else { struct miscdevice *c; list_for_each_entry(c, &misc_list, list) { if (c->minor == misc->minor) { err = -EBUSY; goto out; } } } dev = MKDEV(MISC_MAJOR, misc->minor); misc->this_device = device_create_with_groups(&misc_class, misc->parent, dev, misc, misc->groups, "%s", misc->name); if (IS_ERR(misc->this_device)) { if (is_dynamic) { misc_minor_free(misc->minor); misc->minor = MISC_DYNAMIC_MINOR; } err = PTR_ERR(misc->this_device); goto out; } /* * Add it to the front, so that later devices can "override" * earlier defaults */ list_add(&misc->list, &misc_list); out: mutex_unlock(&misc_mtx); return err; } EXPORT_SYMBOL(misc_register); /** * misc_deregister - unregister a miscellaneous device * @misc: device to unregister * * Unregister a miscellaneous device that was previously * successfully registered with misc_register(). */ void misc_deregister(struct miscdevice *misc) { if (WARN_ON(list_empty(&misc->list))) return; mutex_lock(&misc_mtx); list_del(&misc->list); device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor)); misc_minor_free(misc->minor); mutex_unlock(&misc_mtx); } EXPORT_SYMBOL(misc_deregister); static int __init misc_init(void) { int err; struct proc_dir_entry *ret; ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops); err = class_register(&misc_class); if (err) goto fail_remove; err = -EIO; if (register_chrdev(MISC_MAJOR, "misc", &misc_fops)) goto fail_printk; return 0; fail_printk: pr_err("unable to get major %d for misc devices\n", MISC_MAJOR); class_unregister(&misc_class); fail_remove: if (ret) remove_proc_entry("misc", NULL); return err; } subsys_initcall(misc_init); |
5 4 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 | // SPDX-License-Identifier: GPL-2.0-only /* * RTL8XXXU mac80211 USB driver - 8188f specific subdriver * * Copyright (c) 2022 Bitterblue Smith <rtl8821cerfe2@gmail.com> * * Portions copied from existing rtl8xxxu code: * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. */ #include "regs.h" #include "rtl8xxxu.h" static const struct rtl8xxxu_reg8val rtl8188f_mac_init_table[] = { {0x024, 0xDF}, {0x025, 0x07}, {0x02B, 0x1C}, {0x283, 0x20}, {0x421, 0x0F}, {0x428, 0x0A}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00}, {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, {0x436, 0x07}, {0x437, 0x08}, {0x43C, 0x04}, {0x43D, 0x05}, {0x43E, 0x07}, {0x43F, 0x08}, {0x440, 0x5D}, {0x441, 0x01}, {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00}, {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xF0}, {0x44A, 0x0F}, {0x44B, 0x3E}, {0x44C, 0x10}, {0x44D, 0x00}, {0x44E, 0x00}, {0x44F, 0x00}, {0x450, 0x00}, {0x451, 0xF0}, {0x452, 0x0F}, {0x453, 0x00}, {0x456, 0x5E}, {0x460, 0x44}, {0x461, 0x44}, {0x4BC, 0xC0}, {0x4C8, 0xFF}, {0x4C9, 0x08}, {0x4CC, 0xFF}, {0x4CD, 0xFF}, {0x4CE, 0x01}, {0x500, 0x26}, {0x501, 0xA2}, {0x502, 0x2F}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xA3}, {0x506, 0x5E}, {0x507, 0x00}, {0x508, 0x2B}, {0x509, 0xA4}, {0x50A, 0x5E}, {0x50B, 0x00}, {0x50C, 0x4F}, {0x50D, 0xA4}, {0x50E, 0x00}, {0x50F, 0x00}, {0x512, 0x1C}, {0x514, 0x0A}, {0x516, 0x0A}, {0x525, 0x4F}, {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55C, 0x28}, {0x55D, 0xFF}, {0x605, 0x30}, {0x608, 0x0E}, {0x609, 0x2A}, {0x620, 0xFF}, {0x621, 0xFF}, {0x622, 0xFF}, {0x623, 0xFF}, {0x624, 0xFF}, {0x625, 0xFF}, {0x626, 0xFF}, {0x627, 0xFF}, {0x638, 0x28}, {0x63C, 0x0A}, {0x63D, 0x0A}, {0x63E, 0x0E}, {0x63F, 0x0E}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xC8}, {0x66E, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70A, 0x65}, {0x70B, 0x87}, {0xffff, 0xff}, }; static const struct rtl8xxxu_reg32val rtl8188fu_phy_init_table[] = { {0x800, 0x80045700}, {0x804, 0x00000001}, {0x808, 0x0000FC00}, {0x80C, 0x0000000A}, {0x810, 0x10001331}, {0x814, 0x020C3D10}, {0x818, 0x00200385}, {0x81C, 0x00000000}, {0x820, 0x01000100}, {0x824, 0x00390204}, {0x828, 0x00000000}, {0x82C, 0x00000000}, {0x830, 0x00000000}, {0x834, 0x00000000}, {0x838, 0x00000000}, {0x83C, 0x00000000}, {0x840, 0x00010000}, {0x844, 0x00000000}, {0x848, 0x00000000}, {0x84C, 0x00000000}, {0x850, 0x00030000}, {0x854, 0x00000000}, {0x858, 0x569A569A}, {0x85C, 0x569A569A}, {0x860, 0x00000130}, {0x864, 0x00000000}, {0x868, 0x00000000}, {0x86C, 0x27272700}, {0x870, 0x00000000}, {0x874, 0x25004000}, {0x878, 0x00000808}, {0x87C, 0x004F0201}, {0x880, 0xB0000B1E}, {0x884, 0x00000007}, {0x888, 0x00000000}, {0x88C, 0xCCC000C0}, {0x890, 0x00000800}, {0x894, 0xFFFFFFFE}, {0x898, 0x40302010}, {0x89C, 0x00706050}, {0x900, 0x00000000}, {0x904, 0x00000023}, {0x908, 0x00000000}, {0x90C, 0x81121111}, {0x910, 0x00000002}, {0x914, 0x00000201}, {0x948, 0x99000000}, {0x94C, 0x00000010}, {0x950, 0x20003000}, {0x954, 0x4A880000}, {0x958, 0x4BC5D87A}, {0x95C, 0x04EB9B79}, {0x96C, 0x00000003}, {0xA00, 0x00D047C8}, {0xA04, 0x80FF800C}, {0xA08, 0x8C898300}, {0xA0C, 0x2E7F120F}, {0xA10, 0x9500BB78}, {0xA14, 0x1114D028}, {0xA18, 0x00881117}, {0xA1C, 0x89140F00}, {0xA20, 0xD1D80000}, {0xA24, 0x5A7DA0BD}, {0xA28, 0x0000223B}, {0xA2C, 0x00D30000}, {0xA70, 0x101FBF00}, {0xA74, 0x00000007}, {0xA78, 0x00000900}, {0xA7C, 0x225B0606}, {0xA80, 0x218075B1}, {0xA84, 0x00120000}, {0xA88, 0x040C0000}, {0xA8C, 0x12345678}, {0xA90, 0xABCDEF00}, {0xA94, 0x001B1B89}, {0xA98, 0x05100000}, {0xA9C, 0x3F000000}, {0xAA0, 0x00000000}, {0xB2C, 0x00000000}, {0xC00, 0x48071D40}, {0xC04, 0x03A05611}, {0xC08, 0x000000E4}, {0xC0C, 0x6C6C6C6C}, {0xC10, 0x18800000}, {0xC14, 0x40000100}, {0xC18, 0x08800000}, {0xC1C, 0x40000100}, {0xC20, 0x00000000}, {0xC24, 0x00000000}, {0xC28, 0x00000000}, {0xC2C, 0x00000000}, {0xC30, 0x69E9CC4A}, {0xC34, 0x31000040}, {0xC38, 0x21688080}, {0xC3C, 0x00001714}, {0xC40, 0x1F78403F}, {0xC44, 0x00010036}, {0xC48, 0xEC020107}, {0xC4C, 0x007F037F}, {0xC50, 0x69553420}, {0xC54, 0x43BC0094}, {0xC58, 0x00013169}, {0xC5C, 0x00250492}, {0xC60, 0x00000000}, {0xC64, 0x7112848B}, {0xC68, 0x47C07BFF}, {0xC6C, 0x00000036}, {0xC70, 0x2C7F000D}, {0xC74, 0x020600DB}, {0xC78, 0x0000001F}, {0xC7C, 0x00B91612}, {0xC80, 0x390000E4}, {0xC84, 0x11F60000}, {0xC88, 0x40000100}, {0xC8C, 0x20200000}, {0xC90, 0x00091521}, {0xC94, 0x00000000}, {0xC98, 0x00121820}, {0xC9C, 0x00007F7F}, {0xCA0, 0x00000000}, {0xCA4, 0x000300A0}, {0xCA8, 0x00000000}, {0xCAC, 0x00000000}, {0xCB0, 0x00000000}, {0xCB4, 0x00000000}, {0xCB8, 0x00000000}, {0xCBC, 0x28000000}, {0xCC0, 0x00000000}, {0xCC4, 0x00000000}, {0xCC8, 0x00000000}, {0xCCC, 0x00000000}, {0xCD0, 0x00000000}, {0xCD4, 0x00000000}, {0xCD8, 0x64B22427}, {0xCDC, 0x00766932}, {0xCE0, 0x00222222}, {0xCE4, 0x10000000}, {0xCE8, 0x37644302}, {0xCEC, 0x2F97D40C}, {0xD00, 0x04030740}, {0xD04, 0x40020401}, {0xD08, 0x0000907F}, {0xD0C, 0x20010201}, {0xD10, 0xA0633333}, {0xD14, 0x3333BC53}, {0xD18, 0x7A8F5B6F}, {0xD2C, 0xCB979975}, {0xD30, 0x00000000}, {0xD34, 0x80608000}, {0xD38, 0x98000000}, {0xD3C, 0x40127353}, {0xD40, 0x00000000}, {0xD44, 0x00000000}, {0xD48, 0x00000000}, {0xD4C, 0x00000000}, {0xD50, 0x6437140A}, {0xD54, 0x00000000}, {0xD58, 0x00000282}, {0xD5C, 0x30032064}, {0xD60, 0x4653DE68}, {0xD64, 0x04518A3C}, {0xD68, 0x00002101}, {0xD6C, 0x2A201C16}, {0xD70, 0x1812362E}, {0xD74, 0x322C2220}, {0xD78, 0x000E3C24}, {0xE00, 0x2D2D2D2D}, {0xE04, 0x2D2D2D2D}, {0xE08, 0x0390272D}, {0xE10, 0x2D2D2D2D}, {0xE14, 0x2D2D2D2D}, {0xE18, 0x2D2D2D2D}, {0xE1C, 0x2D2D2D2D}, {0xE28, 0x00000000}, {0xE30, 0x1000DC1F}, {0xE34, 0x10008C1F}, {0xE38, 0x02140102}, {0xE3C, 0x681604C2}, {0xE40, 0x01007C00}, {0xE44, 0x01004800}, {0xE48, 0xFB000000}, {0xE4C, 0x000028D1}, {0xE50, 0x1000DC1F}, {0xE54, 0x10008C1F}, {0xE58, 0x02140102}, {0xE5C, 0x28160D05}, {0xE60, 0x00000008}, {0xE60, 0x021400A0}, {0xE64, 0x281600A0}, {0xE6C, 0x01C00010}, {0xE70, 0x01C00010}, {0xE74, 0x02000010}, {0xE78, 0x02000010}, {0xE7C, 0x02000010}, {0xE80, 0x02000010}, {0xE84, 0x01C00010}, {0xE88, 0x02000010}, {0xE8C, 0x01C00010}, {0xED0, 0x01C00010}, {0xED4, 0x01C00010}, {0xED8, 0x01C00010}, {0xEDC, 0x00000010}, {0xEE0, 0x00000010}, {0xEEC, 0x03C00010}, {0xF14, 0x00000003}, {0xF4C, 0x00000000}, {0xF00, 0x00000300}, {0xffff, 0xffffffff}, }; static const struct rtl8xxxu_reg32val rtl8188f_agc_table[] = { {0xC78, 0xFC000001}, {0xC78, 0xFB010001}, {0xC78, 0xFA020001}, {0xC78, 0xF9030001}, {0xC78, 0xF8040001}, {0xC78, 0xF7050001}, {0xC78, 0xF6060001}, {0xC78, 0xF5070001}, {0xC78, 0xF4080001}, {0xC78, 0xF3090001}, {0xC78, 0xF20A0001}, {0xC78, 0xF10B0001}, {0xC78, 0xF00C0001}, {0xC78, 0xEF0D0001}, {0xC78, 0xEE0E0001}, {0xC78, 0xED0F0001}, {0xC78, 0xEC100001}, {0xC78, 0xEB110001}, {0xC78, 0xEA120001}, {0xC78, 0xE9130001}, {0xC78, 0xE8140001}, {0xC78, 0xE7150001}, {0xC78, 0xE6160001}, {0xC78, 0xE5170001}, {0xC78, 0xE4180001}, {0xC78, 0xE3190001}, {0xC78, 0xE21A0001}, {0xC78, 0xE11B0001}, {0xC78, 0xE01C0001}, {0xC78, 0xC21D0001}, {0xC78, 0xC11E0001}, {0xC78, 0xC01F0001}, {0xC78, 0xA5200001}, {0xC78, 0xA4210001}, {0xC78, 0xA3220001}, {0xC78, 0xA2230001}, {0xC78, 0xA1240001}, {0xC78, 0xA0250001}, {0xC78, 0x65260001}, {0xC78, 0x64270001}, {0xC78, 0x63280001}, {0xC78, 0x62290001}, {0xC78, 0x612A0001}, {0xC78, 0x442B0001}, {0xC78, 0x432C0001}, {0xC78, 0x422D0001}, {0xC78, 0x412E0001}, {0xC78, 0x402F0001}, {0xC78, 0x21300001}, {0xC78, 0x20310001}, {0xC78, 0x05320001}, {0xC78, 0x04330001}, {0xC78, 0x03340001}, {0xC78, 0x02350001}, {0xC78, 0x01360001}, {0xC78, 0x00370001}, {0xC78, 0x00380001}, {0xC78, 0x00390001}, {0xC78, 0x003A0001}, {0xC78, 0x003B0001}, {0xC78, 0x003C0001}, {0xC78, 0x003D0001}, {0xC78, 0x003E0001}, {0xC78, 0x003F0001}, {0xC50, 0x69553422}, {0xC50, 0x69553420}, {0xffff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8188fu_radioa_init_table[] = { {0x00, 0x00030000}, {0x08, 0x00008400}, {0x18, 0x00000407}, {0x19, 0x00000012}, {0x1B, 0x00001C6C}, {0x1E, 0x00080009}, {0x1F, 0x00000880}, {0x2F, 0x0001A060}, {0x3F, 0x00028000}, {0x42, 0x000060C0}, {0x57, 0x000D0000}, {0x58, 0x000C0160}, {0x67, 0x00001552}, {0x83, 0x00000000}, {0xB0, 0x000FF9F0}, {0xB1, 0x00022218}, {0xB2, 0x00034C00}, {0xB4, 0x0004484B}, {0xB5, 0x0000112A}, {0xB6, 0x0000053E}, {0xB7, 0x00010408}, {0xB8, 0x00010200}, {0xB9, 0x00080001}, {0xBA, 0x00040001}, {0xBB, 0x00000400}, {0xBF, 0x000C0000}, {0xC2, 0x00002400}, {0xC3, 0x00000009}, {0xC4, 0x00040C91}, {0xC5, 0x00099999}, {0xC6, 0x000000A3}, {0xC7, 0x0008F820}, {0xC8, 0x00076C06}, {0xC9, 0x00000000}, {0xCA, 0x00080000}, {0xDF, 0x00000180}, {0xEF, 0x000001A0}, {0x51, 0x000E8333}, {0x52, 0x000FAC2C}, {0x53, 0x00000103}, {0x56, 0x000517F0}, {0x35, 0x00000099}, {0x35, 0x00000199}, {0x35, 0x00000299}, {0x36, 0x00000064}, {0x36, 0x00008064}, {0x36, 0x00010064}, {0x36, 0x00018064}, {0x18, 0x00000C07}, {0x5A, 0x00048000}, {0x19, 0x000739D0}, {0x34, 0x0000ADD6}, {0x34, 0x00009DD3}, {0x34, 0x00008CF4}, {0x34, 0x00007CF1}, {0x34, 0x00006CEE}, {0x34, 0x00005CEB}, {0x34, 0x00004CCE}, {0x34, 0x00003CCB}, {0x34, 0x00002CC8}, {0x34, 0x00001C4B}, {0x34, 0x00000C48}, {0x00, 0x00030159}, {0x84, 0x00048000}, {0x86, 0x0000002A}, {0x87, 0x00000025}, {0x8E, 0x00065540}, {0x8F, 0x00088000}, {0xEF, 0x000020A0}, {0x3B, 0x000F0F00}, {0x3B, 0x000E0B00}, {0x3B, 0x000D0900}, {0x3B, 0x000C0700}, {0x3B, 0x000B0600}, {0x3B, 0x000A0400}, {0x3B, 0x00090200}, {0x3B, 0x00080000}, {0x3B, 0x0007BF00}, {0x3B, 0x00060B00}, {0x3B, 0x0005C900}, {0x3B, 0x00040700}, {0x3B, 0x00030600}, {0x3B, 0x0002D500}, {0x3B, 0x00010200}, {0x3B, 0x0000E000}, {0xEF, 0x000000A0}, {0xEF, 0x00000010}, {0x3B, 0x0000C0A8}, {0x3B, 0x00010400}, {0xEF, 0x00000000}, {0xEF, 0x00080000}, {0x30, 0x00010000}, {0x31, 0x0000000F}, {0x32, 0x00007EFE}, {0xEF, 0x00000000}, {0x00, 0x00010159}, {0x18, 0x0000FC07}, {0xFE, 0x00000000}, {0xFE, 0x00000000}, {0x1F, 0x00080003}, {0xFE, 0x00000000}, {0xFE, 0x00000000}, {0x1E, 0x00000001}, {0x1F, 0x00080000}, {0x00, 0x00033D95}, {0xff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8188fu_cut_b_radioa_init_table[] = { {0x00, 0x00030000}, {0x08, 0x00008400}, {0x18, 0x00000407}, {0x19, 0x00000012}, {0x1B, 0x00001C6C}, {0x1E, 0x00080009}, {0x1F, 0x00000880}, {0x2F, 0x0001A060}, {0x3F, 0x00028000}, {0x42, 0x000060C0}, {0x57, 0x000D0000}, {0x58, 0x000C0160}, {0x67, 0x00001552}, {0x83, 0x00000000}, {0xB0, 0x000FF9F0}, {0xB1, 0x00022218}, {0xB2, 0x00034C00}, {0xB4, 0x0004484B}, {0xB5, 0x0000112A}, {0xB6, 0x0000053E}, {0xB7, 0x00010408}, {0xB8, 0x00010200}, {0xB9, 0x00080001}, {0xBA, 0x00040001}, {0xBB, 0x00000400}, {0xBF, 0x000C0000}, {0xC2, 0x00002400}, {0xC3, 0x00000009}, {0xC4, 0x00040C91}, {0xC5, 0x00099999}, {0xC6, 0x000000A3}, {0xC7, 0x0008F820}, {0xC8, 0x00076C06}, {0xC9, 0x00000000}, {0xCA, 0x00080000}, {0xDF, 0x00000180}, {0xEF, 0x000001A0}, {0x51, 0x000E8231}, {0x52, 0x000FAC2C}, {0x53, 0x00000141}, {0x56, 0x000517F0}, {0x35, 0x00000090}, {0x35, 0x00000190}, {0x35, 0x00000290}, {0x36, 0x00001064}, {0x36, 0x00009064}, {0x36, 0x00011064}, {0x36, 0x00019064}, {0x18, 0x00000C07}, {0x5A, 0x00048000}, {0x19, 0x000739D0}, {0x34, 0x0000ADD2}, {0x34, 0x00009DD0}, {0x34, 0x00008CF3}, {0x34, 0x00007CF0}, {0x34, 0x00006CED}, {0x34, 0x00005CD2}, {0x34, 0x00004CCF}, {0x34, 0x00003CCC}, {0x34, 0x00002CC9}, {0x34, 0x00001C4C}, {0x34, 0x00000C49}, {0x00, 0x00030159}, {0x84, 0x00048000}, {0x86, 0x0000002A}, {0x87, 0x00000025}, {0x8E, 0x00065540}, {0x8F, 0x00088000}, {0xEF, 0x000020A0}, {0x3B, 0x000F0F00}, {0x3B, 0x000E0B00}, {0x3B, 0x000D0900}, {0x3B, 0x000C0700}, {0x3B, 0x000B0600}, {0x3B, 0x000A0400}, {0x3B, 0x00090200}, {0x3B, 0x00080000}, {0x3B, 0x0007BF00}, {0x3B, 0x00060B00}, {0x3B, 0x0005C900}, {0x3B, 0x00040700}, {0x3B, 0x00030600}, {0x3B, 0x0002D500}, {0x3B, 0x00010200}, {0x3B, 0x0000E000}, {0xEF, 0x000000A0}, {0xEF, 0x00000010}, {0x3B, 0x0000C0A8}, {0x3B, 0x00010400}, {0xEF, 0x00000000}, {0xEF, 0x00080000}, {0x30, 0x00010000}, {0x31, 0x0000000F}, {0x32, 0x00007EFE}, {0xEF, 0x00000000}, {0x00, 0x00010159}, {0x18, 0x0000FC07}, {0xFE, 0x00000000}, {0xFE, 0x00000000}, {0x1F, 0x00080003}, {0xFE, 0x00000000}, {0xFE, 0x00000000}, {0x1E, 0x00000001}, {0x1F, 0x00080000}, {0x00, 0x00033D95}, {0xff, 0xffffffff} }; static int rtl8188fu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u32 sys_cfg, vendor; int ret = 0; strscpy(priv->chip_name, "8188FU", sizeof(priv->chip_name)); priv->rtl_chip = RTL8188F; priv->rf_paths = 1; priv->rx_paths = 1; priv->tx_paths = 1; priv->has_wifi = 1; sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK); if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { dev_info(dev, "Unsupported test chip\n"); ret = -ENOTSUPP; goto out; } vendor = sys_cfg & SYS_CFG_VENDOR_EXT_MASK; rtl8xxxu_identify_vendor_2bits(priv, vendor); ret = rtl8xxxu_config_endpoints_no_sie(priv); out: return ret; } void rtl8188f_channel_to_group(int channel, int *group, int *cck_group) { if (channel < 3) *group = 0; else if (channel < 6) *group = 1; else if (channel < 9) *group = 2; else if (channel < 12) *group = 3; else *group = 4; if (channel == 14) *cck_group = 5; else *cck_group = *group; } void rtl8188f_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { u32 val32, ofdm, mcs; u8 cck, ofdmbase, mcsbase; int group, cck_group; rtl8188f_channel_to_group(channel, &group, &cck_group); cck = priv->cck_tx_power_index_A[cck_group]; val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32); val32 &= 0xffff00ff; val32 |= (cck << 8); rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32); val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); val32 &= 0xff; val32 |= ((cck << 8) | (cck << 16) | (cck << 24)); rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); ofdmbase = priv->ht40_1s_tx_power_index_A[group]; ofdmbase += priv->ofdm_tx_power_diff[0].a; ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24; rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm); rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm); mcsbase = priv->ht40_1s_tx_power_index_A[group]; if (ht40) /* This diff is always 0 - not used in 8188FU. */ mcsbase += priv->ht40_tx_power_diff[0].a; else mcsbase += priv->ht20_tx_power_diff[0].a; mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs); rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs); rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs); rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs); } /* A workaround to eliminate the 2400MHz, 2440MHz, 2480MHz spur of 8188F. */ static void rtl8188f_spur_calibration(struct rtl8xxxu_priv *priv, u8 channel) { static const u32 frequencies[14 + 1] = { [5] = 0xFCCD, [6] = 0xFC4D, [7] = 0xFFCD, [8] = 0xFF4D, [11] = 0xFDCD, [13] = 0xFCCD, [14] = 0xFF9A }; static const u32 reg_d40[14 + 1] = { [5] = 0x06000000, [6] = 0x00000600, [13] = 0x06000000 }; static const u32 reg_d44[14 + 1] = { [11] = 0x04000000 }; static const u32 reg_d4c[14 + 1] = { [7] = 0x06000000, [8] = 0x00000380, [14] = 0x00180000 }; const u8 threshold = 0x16; bool do_notch, hw_ctrl, sw_ctrl, hw_ctrl_s1 = 0, sw_ctrl_s1 = 0; u32 val32, initial_gain, reg948; val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_D_SYNC_PATH); val32 |= GENMASK(28, 24); rtl8xxxu_write32(priv, REG_OFDM0_RX_D_SYNC_PATH, val32); /* enable notch filter */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_D_SYNC_PATH); val32 |= BIT(9); rtl8xxxu_write32(priv, REG_OFDM0_RX_D_SYNC_PATH, val32); if (channel <= 14 && frequencies[channel] > 0) { reg948 = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); hw_ctrl = reg948 & BIT(6); sw_ctrl = !hw_ctrl; if (hw_ctrl) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE); val32 &= GENMASK(5, 3); hw_ctrl_s1 = val32 == BIT(3); } else if (sw_ctrl) { sw_ctrl_s1 = !(reg948 & BIT(9)); } if (hw_ctrl_s1 || sw_ctrl_s1) { initial_gain = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); /* Disable CCK block */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); val32 &= ~FPGA_RF_MODE_CCK; rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); val32 = initial_gain & ~OFDM0_X_AGC_CORE1_IGI_MASK; val32 |= 0x30; rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32); /* disable 3-wire */ rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccf000c0); /* Setup PSD */ rtl8xxxu_write32(priv, REG_FPGA0_PSD_FUNC, frequencies[channel]); /* Start PSD */ rtl8xxxu_write32(priv, REG_FPGA0_PSD_FUNC, 0x400000 | frequencies[channel]); msleep(30); do_notch = rtl8xxxu_read32(priv, REG_FPGA0_PSD_REPORT) >= threshold; /* turn off PSD */ rtl8xxxu_write32(priv, REG_FPGA0_PSD_FUNC, frequencies[channel]); /* enable 3-wire */ rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccc000c0); /* Enable CCK block */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); val32 |= FPGA_RF_MODE_CCK; rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, initial_gain); if (do_notch) { rtl8xxxu_write32(priv, REG_OFDM1_CSI_FIX_MASK1, reg_d40[channel]); rtl8xxxu_write32(priv, REG_OFDM1_CSI_FIX_MASK2, reg_d44[channel]); rtl8xxxu_write32(priv, 0xd48, 0x0); rtl8xxxu_write32(priv, 0xd4c, reg_d4c[channel]); /* enable CSI mask */ val32 = rtl8xxxu_read32(priv, REG_OFDM1_CFO_TRACKING); val32 |= BIT(28); rtl8xxxu_write32(priv, REG_OFDM1_CFO_TRACKING, val32); return; } } } /* disable CSI mask function */ val32 = rtl8xxxu_read32(priv, REG_OFDM1_CFO_TRACKING); val32 &= ~BIT(28); rtl8xxxu_write32(priv, REG_OFDM1_CFO_TRACKING, val32); } static void rtl8188fu_config_channel(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; u32 val32; u8 channel, subchannel; bool sec_ch_above; channel = (u8)hw->conf.chandef.chan->hw_value; /* Set channel */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); val32 &= ~MODE_AG_CHANNEL_MASK; val32 |= channel; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); /* Spur calibration */ rtl8188f_spur_calibration(priv, channel); /* Set bandwidth mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); val32 &= ~FPGA_RF_MODE; val32 |= hw->conf.chandef.width == NL80211_CHAN_WIDTH_40; rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE); val32 &= ~FPGA_RF_MODE; val32 |= hw->conf.chandef.width == NL80211_CHAN_WIDTH_40; rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32); /* RXADC CLK */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); val32 |= GENMASK(10, 8); rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); /* TXDAC CLK */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); val32 |= BIT(14) | BIT(12); val32 &= ~BIT(13); rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); /* small BW */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT); val32 &= ~GENMASK(31, 30); rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32); /* adc buffer clk */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT); val32 &= ~BIT(29); val32 |= BIT(28); rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32); /* adc buffer clk */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_AFE); val32 &= ~BIT(29); val32 |= BIT(28); rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_AFE, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 &= ~BIT(19); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM_RX_DFIR); val32 &= ~GENMASK(23, 20); val32 |= BIT(21); if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 || hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) val32 |= BIT(20); else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) val32 |= BIT(22); rtl8xxxu_write32(priv, REG_OFDM_RX_DFIR, val32); if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) { if (hw->conf.chandef.center_freq1 > hw->conf.chandef.chan->center_freq) { sec_ch_above = 1; channel += 2; } else { sec_ch_above = 0; channel -= 2; } /* Set Control channel to upper or lower. */ val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM); val32 &= ~CCK0_SIDEBAND; if (!sec_ch_above) val32 |= CCK0_SIDEBAND; rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32); val32 = rtl8xxxu_read32(priv, REG_DATA_SUBCHANNEL); val32 &= ~GENMASK(3, 0); if (sec_ch_above) subchannel = 2; else subchannel = 1; val32 |= subchannel; rtl8xxxu_write32(priv, REG_DATA_SUBCHANNEL, val32); val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET); val32 &= ~RSR_RSC_BANDWIDTH_40M; rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32); } /* RF TRX_BW */ val32 = channel; if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 || hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) val32 |= MODE_AG_BW_20MHZ_8723B; else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) val32 |= MODE_AG_BW_40MHZ_8723B; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); /* FILTER BW&RC Corner (ACPR) */ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 || hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) val32 = 0x00065; else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) val32 = 0x00025; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RXG_MIX_SWBW, val32); if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20 || hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) val32 = 0x0; else if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) val32 = 0x01000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RX_BB2, val32); /* RC Corner */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x00140); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RX_G2, 0x01c6c); } static void rtl8188fu_init_aggregation(struct rtl8xxxu_priv *priv) { u8 agg_ctrl, rxdma_mode, usb_tx_agg_desc_num = 6; u32 agg_rx, val32; /* TX aggregation */ val32 = rtl8xxxu_read32(priv, REG_DWBCN0_CTRL_8188F); val32 &= ~(0xf << 4); val32 |= usb_tx_agg_desc_num << 4; rtl8xxxu_write32(priv, REG_DWBCN0_CTRL_8188F, val32); rtl8xxxu_write8(priv, REG_DWBCN1_CTRL_8723B, usb_tx_agg_desc_num << 1); /* RX aggregation */ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL); agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN; agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH); agg_rx &= ~RXDMA_USB_AGG_ENABLE; agg_rx &= ~0xFF0F; /* reset agg size and timeout */ rxdma_mode = rtl8xxxu_read8(priv, REG_RXDMA_PRO_8723B); rxdma_mode &= ~BIT(1); rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx); rtl8xxxu_write8(priv, REG_RXDMA_PRO_8723B, rxdma_mode); } static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv) { u32 val32; /* Time duration for NHM unit: 4us, 0xc350=200ms */ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0xc350); rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff); rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff50); rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff); /* TH8 */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 |= 0xff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* Enable CCK */ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B); val32 &= ~(BIT(8) | BIT(9) | BIT(10)); val32 |= BIT(8); rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32); /* Max power amongst all RX antennas */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC); val32 |= BIT(7); rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } #define TX_POWER_INDEX_MAX 0x3F #define TX_POWER_INDEX_DEFAULT_CCK 0x22 #define TX_POWER_INDEX_DEFAULT_HT40 0x27 static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu; int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; ether_addr_copy(priv->mac_addr, efuse->mac_addr); memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base, sizeof(efuse->tx_power_index_A.cck_base)); memcpy(priv->ht40_1s_tx_power_index_A, efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); for (i = 0; i < ARRAY_SIZE(priv->cck_tx_power_index_A); i++) { if (priv->cck_tx_power_index_A[i] > TX_POWER_INDEX_MAX) priv->cck_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_CCK; } for (i = 0; i < ARRAY_SIZE(priv->ht40_1s_tx_power_index_A); i++) { if (priv->ht40_1s_tx_power_index_A[i] > TX_POWER_INDEX_MAX) priv->ht40_1s_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_HT40; } priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; priv->default_crystal_cap = efuse->xtal_k & 0x3f; return 0; } static int rtl8188fu_load_firmware(struct rtl8xxxu_priv *priv) { const char *fw_name; int ret; fw_name = "rtlwifi/rtl8188fufw.bin"; ret = rtl8xxxu_load_firmware(priv, fw_name); return ret; } static void rtl8188fu_init_phy_bb(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; /* Enable BB and RF */ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); /* * Per vendor driver, run power sequence before init of RF */ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; rtl8xxxu_write8(priv, REG_RF_CTRL, val8); usleep_range(10, 20); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780); val8 = SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_USBA | SYS_FUNC_USBD; rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); rtl8xxxu_init_phy_regs(priv, rtl8188fu_phy_init_table); rtl8xxxu_init_phy_regs(priv, rtl8188f_agc_table); } static int rtl8188fu_init_phy_rf(struct rtl8xxxu_priv *priv) { int ret; if (priv->chip_cut == 1) ret = rtl8xxxu_init_phy_rf(priv, rtl8188fu_cut_b_radioa_init_table, RF_A); else ret = rtl8xxxu_init_phy_rf(priv, rtl8188fu_radioa_init_table, RF_A); return ret; } void rtl8188f_phy_lc_calibrate(struct rtl8xxxu_priv *priv) { u32 val32; u32 rf_amode, lstf; int i; /* Check continuous TX and Packet TX */ lstf = rtl8xxxu_read32(priv, REG_OFDM1_LSTF); if (lstf & OFDM_LSTF_MASK) { /* Disable all continuous TX */ val32 = lstf & ~OFDM_LSTF_MASK; rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32); } else { /* Deal with Packet TX case */ /* block all queues */ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); } /* Read original RF mode Path A */ rf_amode = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); /* Start LC calibration */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, rf_amode | 0x08000); for (i = 0; i < 100; i++) { if ((rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG) & 0x08000) == 0) break; msleep(10); } if (i == 100) dev_warn(&priv->udev->dev, "LC calibration timed out.\n"); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, rf_amode); /* Restore original parameters */ if (lstf & OFDM_LSTF_MASK) rtl8xxxu_write32(priv, REG_OFDM1_LSTF, lstf); else /* Deal with Packet TX case */ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } static int rtl8188fu_iqk_path_a(struct rtl8xxxu_priv *priv, u32 *lok_result) { u32 reg_eac, reg_e94, reg_e9c, val32; int result = 0; /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Enable path A PA in TX IQK mode */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0x07ff7); /* PA,PAD gain adjust */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x980); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_PAD_TXG, 0x5102a); /* enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ff); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(25); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x180); /* save LOK result */ *lok_result = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_TXM_IDAC); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000)) result |= 0x01; return result; } static int rtl8188fu_rx_iqk_path_a(struct rtl8xxxu_priv *priv, u32 lok_result) { u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32; int result = 0; /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Enable path A PA in TX IQK mode */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf1173); /* PA,PAD gain adjust */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x980); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_PAD_TXG, 0x5102a); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * Tx IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x30008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160fff); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(25); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x180); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000)) result |= 0x01; else /* If TX not OK, ignore RX */ goto out; val32 = 0x80007c00 | (reg_e94 & 0x3ff0000) | ((reg_e9c & 0x3ff0000) >> 16); rtl8xxxu_write32(priv, REG_TX_IQK, val32); /* * Modify RX IQK mode table */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); val32 |= 0x80000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ff2); /* * PA, PAD setting */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x980); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_PAD_TXG, 0x51000); /* * Enter IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); /* * RX IQK setting */ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x30008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x281613ff); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); mdelay(25); /* * Leave IQK mode */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0x000000ff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, 0x180); /* reload LOK value */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXM_IDAC, lok_result); /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); if (!(reg_eac & BIT(27)) && ((reg_ea4 & 0x03ff0000) != 0x01320000) && ((reg_eac & 0x03ff0000) != 0x00360000)) result |= 0x02; out: return result; } static void rtl8188fu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, int result[][8], int t) { struct device *dev = &priv->udev->dev; u32 i, val32, rx_initial_gain, lok_result; u32 path_sel_bb, path_sel_rf; int path_a_ok; int retry = 2; static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = { REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH, REG_RX_WAIT_CCA, REG_TX_CCK_RFON, REG_TX_CCK_BBON, REG_TX_OFDM_RFON, REG_TX_OFDM_BBON, REG_TX_TO_RX, REG_TX_TO_TX, REG_RX_CCK, REG_RX_OFDM, REG_RX_WAIT_RIFS, REG_RX_TO_RX, REG_STANDBY, REG_SLEEP, REG_PMPD_ANAEN }; static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { REG_TXPAUSE, REG_BEACON_CTRL, REG_BEACON_CTRL_1, REG_GPIO_MUXCFG }; static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE, REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE }; /* * Note: IQ calibration must be performed after loading * PHY_REG.txt , and radio_a, radio_b.txt */ rx_initial_gain = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); if (t == 0) { /* Save ADDA parameters, turn Path A ADDA on */ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); rtl8xxxu_save_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); } rtl8xxxu_path_adda_on(priv, adda_regs, true); if (t == 0) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1); priv->pi_enabled = u32_get_bits(val32, FPGA0_HSSI_PARM1_PI); } /* save RF path */ path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); path_sel_rf = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_S0S1); /* BB setting */ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x25204000); /* MAC settings */ val32 = rtl8xxxu_read32(priv, REG_TX_PTCL_CTRL); val32 |= 0x00ff0000; rtl8xxxu_write32(priv, REG_TX_PTCL_CTRL, val32); /* IQ calibration setting */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0xff; val32 |= 0x80800000; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); for (i = 0; i < retry; i++) { path_a_ok = rtl8188fu_iqk_path_a(priv, &lok_result); if (path_a_ok == 0x01) { val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0xff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); result[t][0] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); result[t][1] = (val32 >> 16) & 0x3ff; break; } } for (i = 0; i < retry; i++) { path_a_ok = rtl8188fu_rx_iqk_path_a(priv, lok_result); if (path_a_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); result[t][2] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); result[t][3] = (val32 >> 16) & 0x3ff; break; } } if (!path_a_ok) dev_dbg(dev, "%s: Path A IQK failed!\n", __func__); /* Back to BB mode, load original value */ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); val32 &= 0xff; rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); if (t == 0) return; if (!priv->pi_enabled) { /* * Switch back BB to SI mode after finishing * IQ Calibration */ val32 = 0x01000000; rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, val32); rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, val32); } /* Reload ADDA power saving parameters */ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, RTL8XXXU_ADDA_REGS); /* Reload MAC parameters */ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); /* Reload BB parameters */ rtl8xxxu_restore_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); /* Reload RF path */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, path_sel_rf); /* Restore RX initial gain */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); val32 &= 0xffffff00; val32 |= 0x50; rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32); val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); val32 &= 0xffffff00; val32 |= rx_initial_gain & 0xff; rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32); /* Load 0xe30 IQC default value */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00); } static void rtl8188fu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; int result[4][8]; /* last is final result */ int i, candidate; bool path_a_ok; u32 reg_e94, reg_e9c, reg_ea4, reg_eac; u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc; s32 reg_tmp = 0; bool simu; u32 path_sel_bb, path_sel_rf; /* Save RF path */ path_sel_bb = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); path_sel_rf = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_S0S1); memset(result, 0, sizeof(result)); candidate = -1; path_a_ok = false; for (i = 0; i < 3; i++) { rtl8188fu_phy_iqcalibrate(priv, result, i); if (i == 1) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 1); if (simu) { candidate = 0; break; } } if (i == 2) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 2); if (simu) { candidate = 0; break; } simu = rtl8xxxu_gen2_simularity_compare(priv, result, 1, 2); if (simu) { candidate = 1; } else { for (i = 0; i < 8; i++) reg_tmp += result[3][i]; if (reg_tmp) candidate = 3; else candidate = -1; } } } for (i = 0; i < 4; i++) { reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; reg_ec4 = result[i][6]; reg_ecc = result[i][7]; } if (candidate >= 0) { reg_e94 = result[candidate][0]; priv->rege94 = reg_e94; reg_e9c = result[candidate][1]; priv->rege9c = reg_e9c; reg_ea4 = result[candidate][2]; reg_eac = result[candidate][3]; reg_eb4 = result[candidate][4]; priv->regeb4 = reg_eb4; reg_ebc = result[candidate][5]; priv->regebc = reg_ebc; reg_ec4 = result[candidate][6]; reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; } else { reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100; reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0; } if (reg_e94 && candidate >= 0) rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, candidate, (reg_ea4 == 0)); rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel_bb); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, path_sel_rf); } static void rtl8188f_disabled_to_emu(struct rtl8xxxu_priv *priv) { u16 val8; /* 0x04[12:11] = 2b'01enable WL suspend */ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 &= ~((APS_FSMCO_PCIE | APS_FSMCO_HW_SUSPEND) >> 8); rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); /* 0xC4[4] <= 1, turn off USB APHY LDO under suspend mode */ val8 = rtl8xxxu_read8(priv, 0xc4); val8 &= ~BIT(4); rtl8xxxu_write8(priv, 0xc4, val8); } static int rtl8188f_emu_to_active(struct rtl8xxxu_priv *priv) { u8 val8; u32 val32; int count, ret = 0; /* Disable SW LPS */ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 &= ~(APS_FSMCO_SW_LPS >> 8); rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); /* wait till 0x04[17] = 1 power ready */ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if (val32 & BIT(17)) break; udelay(10); } if (!count) { ret = -EBUSY; goto exit; } /* Disable HWPDN */ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 &= ~(APS_FSMCO_HW_POWERDOWN >> 8); rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); /* Disable WL suspend */ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 &= ~(APS_FSMCO_HW_SUSPEND >> 8); rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); /* set, then poll until 0 */ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 |= APS_FSMCO_MAC_ENABLE >> 8; rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) { ret = 0; break; } udelay(10); } if (!count) { ret = -EBUSY; goto exit; } /* 0x27<=35 to reduce RF noise */ val8 = rtl8xxxu_write8(priv, 0x27, 0x35); exit: return ret; } static int rtl8188fu_active_to_emu(struct rtl8xxxu_priv *priv) { u8 val8; u32 val32; int count, ret = 0; /* Turn off RF */ rtl8xxxu_write8(priv, REG_RF_CTRL, 0); /* 0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */ val8 = rtl8xxxu_read8(priv, 0x4e); val8 &= ~BIT(7); rtl8xxxu_write8(priv, 0x4e, val8); /* 0x27 <= 34, xtal_qsel = 0 to xtal bring up */ rtl8xxxu_write8(priv, 0x27, 0x34); /* 0x04[9] = 1 turn off MAC by HW state machine */ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 |= APS_FSMCO_MAC_OFF >> 8; rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if ((val32 & APS_FSMCO_MAC_OFF) == 0) { ret = 0; break; } udelay(10); } if (!count) { ret = -EBUSY; goto exit; } exit: return ret; } static int rtl8188fu_emu_to_disabled(struct rtl8xxxu_priv *priv) { u8 val8; /* 0x04[12:11] = 2b'01 enable WL suspend */ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); val8 &= ~((APS_FSMCO_PCIE | APS_FSMCO_HW_SUSPEND) >> 8); val8 |= APS_FSMCO_HW_SUSPEND >> 8; rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); /* 0xC4[4] <= 1, turn off USB APHY LDO under suspend mode */ val8 = rtl8xxxu_read8(priv, 0xc4); val8 |= BIT(4); rtl8xxxu_write8(priv, 0xc4, val8); return 0; } static int rtl8188fu_active_to_lps(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u8 val8; u16 val16; u32 val32; int retry, retval; /* set RPWM IMR */ val8 = rtl8xxxu_read8(priv, REG_FTIMR + 1); val8 |= IMR0_CPWM >> 8; rtl8xxxu_write8(priv, REG_FTIMR + 1, val8); /* Tx Pause */ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); retry = 100; retval = -EBUSY; /* * Poll 32 bit wide REG_SCH_TX_CMD for 0x00000000 to ensure no TX is pending. */ do { val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD); if (!val32) { retval = 0; break; } } while (retry--); if (!retry) { dev_warn(dev, "Failed to flush TX queue\n"); retval = -EBUSY; goto out; } /* Disable CCK and OFDM, clock gated */ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); val8 &= ~SYS_FUNC_BBRSTB; rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); udelay(2); /* Whole BB is reset */ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); val8 &= ~SYS_FUNC_BB_GLB_RSTN; rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); /* Reset MAC TRX */ val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= 0x3f; val16 &= ~(CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | CR_SECURITY_ENABLE); rtl8xxxu_write16(priv, REG_CR, val16); /* Respond TxOK to scheduler */ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST); val8 |= DUAL_TSF_TX_OK; rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8); out: return retval; } static int rtl8188fu_power_on(struct rtl8xxxu_priv *priv) { u16 val16; int ret; rtl8188f_disabled_to_emu(priv); ret = rtl8188f_emu_to_active(priv); if (ret) goto exit; rtl8xxxu_write8(priv, REG_CR, 0); val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE); rtl8xxxu_write16(priv, REG_CR, val16); exit: return ret; } static void rtl8188fu_power_off(struct rtl8xxxu_priv *priv) { u8 val8; u16 val16; rtl8xxxu_flush_fifo(priv); val16 = rtl8xxxu_read16(priv, REG_GPIO_MUXCFG); val16 &= ~BIT(12); rtl8xxxu_write16(priv, REG_GPIO_MUXCFG, val16); rtl8xxxu_write32(priv, REG_HISR0, 0xFFFFFFFF); rtl8xxxu_write32(priv, REG_HISR1, 0xFFFFFFFF); /* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE; rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); /* Turn off RF */ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00); /* Reset Firmware if running in RAM */ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL) rtl8xxxu_firmware_self_reset(priv); rtl8188fu_active_to_lps(priv); /* Reset MCU */ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); val16 &= ~SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); /* Reset MCU ready status */ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); rtl8188fu_active_to_emu(priv); rtl8188fu_emu_to_disabled(priv); } #define PPG_BB_GAIN_2G_TXA_OFFSET_8188F 0xee #define PPG_BB_GAIN_2G_TX_OFFSET_MASK 0x0f static void rtl8188f_enable_rf(struct rtl8xxxu_priv *priv) { u32 val32; u8 pg_pwrtrim = 0xff, val8; s8 bb_gain; /* Somehow this is not found in the efuse we read earlier. */ rtl8xxxu_read_efuse8(priv, PPG_BB_GAIN_2G_TXA_OFFSET_8188F, &pg_pwrtrim); if (pg_pwrtrim != 0xff) { bb_gain = pg_pwrtrim & PPG_BB_GAIN_2G_TX_OFFSET_MASK; if (bb_gain == PPG_BB_GAIN_2G_TX_OFFSET_MASK) bb_gain = 0; else if (bb_gain & 1) bb_gain = bb_gain >> 1; else bb_gain = -(bb_gain >> 1); val8 = abs(bb_gain); if (bb_gain > 0) val8 |= BIT(5); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55); val32 &= ~0xfc000; val32 |= val8 << 14; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, val32); } rtl8xxxu_write8(priv, REG_RF_CTRL, RF_ENABLE | RF_RSTB | RF_SDMRSTB); val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK); val32 |= OFDM_RF_PATH_RX_A | OFDM_RF_PATH_TX_A; rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } static void rtl8188f_disable_rf(struct rtl8xxxu_priv *priv) { u32 val32; val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); val32 &= ~OFDM_RF_PATH_TX_MASK; rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); /* Power down RF module */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0); } static void rtl8188f_usb_quirks(struct rtl8xxxu_priv *priv) { u16 val16; u32 val32; val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= (CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE); rtl8xxxu_write16(priv, REG_CR, val16); val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); val32 |= TXDMA_OFFSET_DROP_DATA_EN; rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); } #define XTAL1 GENMASK(22, 17) #define XTAL0 GENMASK(16, 11) void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap) { struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking; u32 val32; if (crystal_cap == cfo->crystal_cap) return; val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); dev_dbg(&priv->udev->dev, "%s: Adjusting crystal cap from 0x%x (actually 0x%lx 0x%lx) to 0x%x\n", __func__, cfo->crystal_cap, FIELD_GET(XTAL1, val32), FIELD_GET(XTAL0, val32), crystal_cap); val32 &= ~(XTAL1 | XTAL0); val32 |= FIELD_PREP(XTAL1, crystal_cap) | FIELD_PREP(XTAL0, crystal_cap); rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32); cfo->crystal_cap = crystal_cap; } static s8 rtl8188f_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_stats *phy_stats) { u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a; s8 rx_pwr_all = 0x00; u8 vga_idx, lna_idx; lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK); vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK); switch (lna_idx) { case 7: if (vga_idx <= 27) rx_pwr_all = -100 + 2 * (27 - vga_idx); else rx_pwr_all = -100; break; case 5: rx_pwr_all = -74 + 2 * (21 - vga_idx); break; case 3: rx_pwr_all = -60 + 2 * (20 - vga_idx); break; case 1: rx_pwr_all = -44 + 2 * (19 - vga_idx); break; default: break; } return rx_pwr_all; } struct rtl8xxxu_fileops rtl8188fu_fops = { .identify_chip = rtl8188fu_identify_chip, .parse_efuse = rtl8188fu_parse_efuse, .load_firmware = rtl8188fu_load_firmware, .power_on = rtl8188fu_power_on, .power_off = rtl8188fu_power_off, .read_efuse = rtl8xxxu_read_efuse, .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .init_phy_bb = rtl8188fu_init_phy_bb, .init_phy_rf = rtl8188fu_init_phy_rf, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_lc_calibrate = rtl8188f_phy_lc_calibrate, .phy_iq_calibrate = rtl8188fu_phy_iq_calibrate, .config_channel = rtl8188fu_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .parse_phystats = rtl8723au_rx_parse_phystats, .init_aggregation = rtl8188fu_init_aggregation, .init_statistics = rtl8188fu_init_statistics, .init_burst = rtl8xxxu_init_burst, .enable_rf = rtl8188f_enable_rf, .disable_rf = rtl8188f_disable_rf, .usb_quirks = rtl8188f_usb_quirks, .set_tx_power = rtl8188f_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, .report_connect = rtl8xxxu_gen2_report_connect, .report_rssi = rtl8xxxu_gen2_report_rssi, .fill_txdesc = rtl8xxxu_fill_txdesc_v2, .set_crystal_cap = rtl8188f_set_crystal_cap, .cck_rssi = rtl8188f_cck_rssi, .writeN_block_size = 128, .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .has_s0s1 = 1, .has_tx_report = 1, .gen2_thermal_meter = 1, .needs_full_init = 1, .init_reg_rxfltmap = 1, .init_reg_pkt_life_time = 1, .init_reg_hmtfr = 1, .ampdu_max_time = 0x70, .ustime_tsf_edca = 0x28, .max_aggr_num = 0x0c14, .supports_ap = 1, .max_macid_num = 16, .max_sec_cam_num = 16, .supports_concurrent = 1, .adda_1t_init = 0x03c00014, .adda_1t_path_on = 0x03c00014, .trxff_boundary = 0x3f7f, .pbp_rx = PBP_PAGE_SIZE_256, .pbp_tx = PBP_PAGE_SIZE_256, .mactable = rtl8188f_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM_8188F, .page_num_hi = TX_PAGE_NUM_HI_PQ_8188F, .page_num_lo = TX_PAGE_NUM_LO_PQ_8188F, .page_num_norm = TX_PAGE_NUM_NORM_PQ_8188F, }; |
9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 3 3 6 6 9 9 9 9 9 9 9 9 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 | // SPDX-License-Identifier: GPL-2.0-or-later /* Linux driver for Philips webcam Decompression for chipset version 2 et 3 (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. */ #include "pwc-timon.h" #include "pwc-kiara.h" #include "pwc-dec23.h" #include <linux/string.h> #include <linux/slab.h> /* * USE_LOOKUP_TABLE_TO_CLAMP * 0: use a C version of this tests: { a<0?0:(a>255?255:a) } * 1: use a faster lookup table for cpu with a big cache (intel) */ #define USE_LOOKUP_TABLE_TO_CLAMP 1 /* * UNROLL_LOOP_FOR_COPYING_BLOCK * 0: use a loop for a smaller code (but little slower) * 1: when unrolling the loop, gcc produces some faster code (perhaps only * valid for intel processor class). Activating this option, automatically * activate USE_LOOKUP_TABLE_TO_CLAMP */ #define UNROLL_LOOP_FOR_COPY 1 #if UNROLL_LOOP_FOR_COPY # undef USE_LOOKUP_TABLE_TO_CLAMP # define USE_LOOKUP_TABLE_TO_CLAMP 1 #endif static void build_subblock_pattern(struct pwc_dec23_private *pdec) { static const unsigned int initial_values[12] = { -0x526500, -0x221200, 0x221200, 0x526500, -0x3de200, 0x3de200, -0x6db480, -0x2d5d00, 0x2d5d00, 0x6db480, -0x12c200, 0x12c200 }; static const unsigned int values_derivated[12] = { 0xa4ca, 0x4424, -0x4424, -0xa4ca, 0x7bc4, -0x7bc4, 0xdb69, 0x5aba, -0x5aba, -0xdb69, 0x2584, -0x2584 }; unsigned int temp_values[12]; int i, j; memcpy(temp_values, initial_values, sizeof(initial_values)); for (i = 0; i < 256; i++) { for (j = 0; j < 12; j++) { pdec->table_subblock[i][j] = temp_values[j]; temp_values[j] += values_derivated[j]; } } } static void build_bit_powermask_table(struct pwc_dec23_private *pdec) { unsigned char *p; unsigned int bit, byte, mask, val; unsigned int bitpower = 1; for (bit = 0; bit < 8; bit++) { mask = bitpower - 1; p = pdec->table_bitpowermask[bit]; for (byte = 0; byte < 256; byte++) { val = (byte & mask); if (byte & bitpower) val = -val; *p++ = val; } bitpower<<=1; } } static void build_table_color(const unsigned int romtable[16][8], unsigned char p0004[16][1024], unsigned char p8004[16][256]) { int compression_mode, j, k, bit, pw; unsigned char *p0, *p8; const unsigned int *r; /* We have 16 compressions tables */ for (compression_mode = 0; compression_mode < 16; compression_mode++) { p0 = p0004[compression_mode]; p8 = p8004[compression_mode]; r = romtable[compression_mode]; for (j = 0; j < 8; j++, r++, p0 += 128) { for (k = 0; k < 16; k++) { if (k == 0) bit = 1; else if (k >= 1 && k < 3) bit = (r[0] >> 15) & 7; else if (k >= 3 && k < 6) bit = (r[0] >> 12) & 7; else if (k >= 6 && k < 10) bit = (r[0] >> 9) & 7; else if (k >= 10 && k < 13) bit = (r[0] >> 6) & 7; else if (k >= 13 && k < 15) bit = (r[0] >> 3) & 7; else bit = (r[0]) & 7; if (k == 0) *p8++ = 8; else *p8++ = j - bit; *p8++ = bit; pw = 1 << bit; p0[k + 0x00] = (1 * pw) + 0x80; p0[k + 0x10] = (2 * pw) + 0x80; p0[k + 0x20] = (3 * pw) + 0x80; p0[k + 0x30] = (4 * pw) + 0x80; p0[k + 0x40] = (-1 * pw) + 0x80; p0[k + 0x50] = (-2 * pw) + 0x80; p0[k + 0x60] = (-3 * pw) + 0x80; p0[k + 0x70] = (-4 * pw) + 0x80; } /* end of for (k=0; k<16; k++, p8++) */ } /* end of for (j=0; j<8; j++ , table++) */ } /* end of foreach compression_mode */ } /* * */ static void fill_table_dc00_d800(struct pwc_dec23_private *pdec) { #define SCALEBITS 15 #define ONE_HALF (1UL << (SCALEBITS - 1)) int i; unsigned int offset1 = ONE_HALF; unsigned int offset2 = 0x0000; for (i=0; i<256; i++) { pdec->table_dc00[i] = offset1 & ~(ONE_HALF); pdec->table_d800[i] = offset2; offset1 += 0x7bc4; offset2 += 0x7bc4; } } /* * To decode the stream: * if look_bits(2) == 0: # op == 2 in the lookup table * skip_bits(2) * end of the stream * elif look_bits(3) == 7: # op == 1 in the lookup table * skip_bits(3) * yyyy = get_bits(4) * xxxx = get_bits(8) * else: # op == 0 in the lookup table * skip_bits(x) * * For speedup processing, we build a lookup table and we takes the first 6 bits. * * struct { * unsigned char op; // operation to execute * unsigned char bits; // bits use to perform operation * unsigned char offset1; // offset to add to access in the table_0004 % 16 * unsigned char offset2; // offset to add to access in the table_0004 * } * * How to build this table ? * op == 2 when (i%4)==0 * op == 1 when (i%8)==7 * op == 0 otherwise * */ static const unsigned char hash_table_ops[64*4] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x10, 0x00, 0x06, 0x01, 0x30, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x01, 0x20, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x50, 0x00, 0x05, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x10, 0x00, 0x06, 0x02, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x01, 0x60, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x50, 0x00, 0x05, 0x02, 0x40, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x03, 0x40, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x10, 0x00, 0x06, 0x01, 0x70, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x01, 0x20, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x50, 0x00, 0x05, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x10, 0x00, 0x06, 0x02, 0x50, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x01, 0x60, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x04, 0x01, 0x50, 0x00, 0x05, 0x02, 0x40, 0x02, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x40, 0x00, 0x05, 0x03, 0x40, 0x01, 0x00, 0x00, 0x00 }; /* * */ static const unsigned int MulIdx[16][16] = { {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,}, {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3,}, {4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4,}, {6, 7, 8, 9, 7, 10, 11, 8, 8, 11, 10, 7, 9, 8, 7, 6,}, {4, 5, 5, 4, 4, 5, 5, 4, 4, 5, 5, 4, 4, 5, 5, 4,}, {1, 3, 0, 2, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3, 0, 2,}, {0, 3, 3, 0, 1, 2, 2, 1, 2, 1, 1, 2, 3, 0, 0, 3,}, {0, 1, 2, 3, 3, 2, 1, 0, 3, 2, 1, 0, 0, 1, 2, 3,}, {1, 1, 1, 1, 3, 3, 3, 3, 0, 0, 0, 0, 2, 2, 2, 2,}, {7, 10, 11, 8, 9, 8, 7, 6, 6, 7, 8, 9, 8, 11, 10, 7,}, {4, 5, 5, 4, 5, 4, 4, 5, 5, 4, 4, 5, 4, 5, 5, 4,}, {7, 9, 6, 8, 10, 8, 7, 11, 11, 7, 8, 10, 8, 6, 9, 7,}, {1, 3, 0, 2, 2, 0, 3, 1, 2, 0, 3, 1, 1, 3, 0, 2,}, {1, 2, 2, 1, 3, 0, 0, 3, 0, 3, 3, 0, 2, 1, 1, 2,}, {10, 8, 7, 11, 8, 6, 9, 7, 7, 9, 6, 8, 11, 7, 8, 10} }; #if USE_LOOKUP_TABLE_TO_CLAMP #define MAX_OUTER_CROP_VALUE (512) static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE]; #define CLAMP(x) (pwc_crop_table[MAX_OUTER_CROP_VALUE+(x)]) #else #define CLAMP(x) ((x)>255?255:((x)<0?0:x)) #endif /* If the type or the command change, we rebuild the lookup table */ void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd) { int flags, version, shift, i; struct pwc_dec23_private *pdec = &pdev->dec23; mutex_init(&pdec->lock); if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2]) return; if (DEVICE_USE_CODEC3(pdev->type)) { flags = cmd[2] & 0x18; if (flags == 8) pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */ else if (flags == 0x10) pdec->nbits = 8; else pdec->nbits = 6; version = cmd[2] >> 5; build_table_color(KiaraRomTable[version][0], pdec->table_0004_pass1, pdec->table_8004_pass1); build_table_color(KiaraRomTable[version][1], pdec->table_0004_pass2, pdec->table_8004_pass2); } else { flags = cmd[2] & 6; if (flags == 2) pdec->nbits = 7; else if (flags == 4) pdec->nbits = 8; else pdec->nbits = 6; version = cmd[2] >> 3; build_table_color(TimonRomTable[version][0], pdec->table_0004_pass1, pdec->table_8004_pass1); build_table_color(TimonRomTable[version][1], pdec->table_0004_pass2, pdec->table_8004_pass2); } /* Information can be coded on a variable number of bits but never less than 8 */ shift = 8 - pdec->nbits; pdec->scalebits = SCALEBITS - shift; pdec->nbitsmask = 0xFF >> shift; fill_table_dc00_d800(pdec); build_subblock_pattern(pdec); build_bit_powermask_table(pdec); #if USE_LOOKUP_TABLE_TO_CLAMP /* Build the static table to clamp value [0-255] */ for (i=0;i<MAX_OUTER_CROP_VALUE;i++) pwc_crop_table[i] = 0; for (i=0; i<256; i++) pwc_crop_table[MAX_OUTER_CROP_VALUE+i] = i; for (i=0; i<MAX_OUTER_CROP_VALUE; i++) pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255; #endif pdec->last_cmd = cmd[2]; pdec->last_cmd_valid = 1; } /* * Copy the 4x4 image block to Y plane buffer */ static void copy_image_block_Y(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits) { #if UNROLL_LOOP_FOR_COPY const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE; const int *c = src; unsigned char *d = dst; *d++ = cm[c[0] >> scalebits]; *d++ = cm[c[1] >> scalebits]; *d++ = cm[c[2] >> scalebits]; *d++ = cm[c[3] >> scalebits]; d = dst + bytes_per_line; *d++ = cm[c[4] >> scalebits]; *d++ = cm[c[5] >> scalebits]; *d++ = cm[c[6] >> scalebits]; *d++ = cm[c[7] >> scalebits]; d = dst + bytes_per_line*2; *d++ = cm[c[8] >> scalebits]; *d++ = cm[c[9] >> scalebits]; *d++ = cm[c[10] >> scalebits]; *d++ = cm[c[11] >> scalebits]; d = dst + bytes_per_line*3; *d++ = cm[c[12] >> scalebits]; *d++ = cm[c[13] >> scalebits]; *d++ = cm[c[14] >> scalebits]; *d++ = cm[c[15] >> scalebits]; #else int i; const int *c = src; unsigned char *d = dst; for (i = 0; i < 4; i++, c++) *d++ = CLAMP((*c) >> scalebits); d = dst + bytes_per_line; for (i = 0; i < 4; i++, c++) *d++ = CLAMP((*c) >> scalebits); d = dst + bytes_per_line*2; for (i = 0; i < 4; i++, c++) *d++ = CLAMP((*c) >> scalebits); d = dst + bytes_per_line*3; for (i = 0; i < 4; i++, c++) *d++ = CLAMP((*c) >> scalebits); #endif } /* * Copy the 4x4 image block to a CrCb plane buffer * */ static void copy_image_block_CrCb(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits) { #if UNROLL_LOOP_FOR_COPY /* Unroll all loops */ const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE; const int *c = src; unsigned char *d = dst; *d++ = cm[c[0] >> scalebits]; *d++ = cm[c[4] >> scalebits]; *d++ = cm[c[1] >> scalebits]; *d++ = cm[c[5] >> scalebits]; *d++ = cm[c[2] >> scalebits]; *d++ = cm[c[6] >> scalebits]; *d++ = cm[c[3] >> scalebits]; *d++ = cm[c[7] >> scalebits]; d = dst + bytes_per_line; *d++ = cm[c[12] >> scalebits]; *d++ = cm[c[8] >> scalebits]; *d++ = cm[c[13] >> scalebits]; *d++ = cm[c[9] >> scalebits]; *d++ = cm[c[14] >> scalebits]; *d++ = cm[c[10] >> scalebits]; *d++ = cm[c[15] >> scalebits]; *d++ = cm[c[11] >> scalebits]; #else int i; const int *c1 = src; const int *c2 = src + 4; unsigned char *d = dst; for (i = 0; i < 4; i++, c1++, c2++) { *d++ = CLAMP((*c1) >> scalebits); *d++ = CLAMP((*c2) >> scalebits); } c1 = src + 12; d = dst + bytes_per_line; for (i = 0; i < 4; i++, c1++, c2++) { *d++ = CLAMP((*c1) >> scalebits); *d++ = CLAMP((*c2) >> scalebits); } #endif } /* * To manage the stream, we keep bits in a 32 bits register. * fill_nbits(n): fill the reservoir with at least n bits * skip_bits(n): discard n bits from the reservoir * get_bits(n): fill the reservoir, returns the first n bits and discard the * bits from the reservoir. * __get_nbits(n): faster version of get_bits(n), but asumes that the reservoir * contains at least n bits. bits returned is discarded. */ #define fill_nbits(pdec, nbits_wanted) do { \ while (pdec->nbits_in_reservoir<(nbits_wanted)) \ { \ pdec->reservoir |= (*(pdec->stream)++) << (pdec->nbits_in_reservoir); \ pdec->nbits_in_reservoir += 8; \ } \ } while(0); #define skip_nbits(pdec, nbits_to_skip) do { \ pdec->reservoir >>= (nbits_to_skip); \ pdec->nbits_in_reservoir -= (nbits_to_skip); \ } while(0); #define get_nbits(pdec, nbits_wanted, result) do { \ fill_nbits(pdec, nbits_wanted); \ result = (pdec->reservoir) & ((1U<<(nbits_wanted))-1); \ skip_nbits(pdec, nbits_wanted); \ } while(0); #define __get_nbits(pdec, nbits_wanted, result) do { \ result = (pdec->reservoir) & ((1U<<(nbits_wanted))-1); \ skip_nbits(pdec, nbits_wanted); \ } while(0); #define look_nbits(pdec, nbits_wanted) \ ((pdec->reservoir) & ((1U<<(nbits_wanted))-1)) /* * Decode a 4x4 pixel block */ static void decode_block(struct pwc_dec23_private *pdec, const unsigned char *ptable0004, const unsigned char *ptable8004) { unsigned int primary_color; unsigned int channel_v, offset1, op; int i; fill_nbits(pdec, 16); __get_nbits(pdec, pdec->nbits, primary_color); if (look_nbits(pdec,2) == 0) { skip_nbits(pdec, 2); /* Very simple, the color is the same for all pixels of the square */ for (i = 0; i < 16; i++) pdec->temp_colors[i] = pdec->table_dc00[primary_color]; return; } /* This block is encoded with small pattern */ for (i = 0; i < 16; i++) pdec->temp_colors[i] = pdec->table_d800[primary_color]; __get_nbits(pdec, 3, channel_v); channel_v = ((channel_v & 1) << 2) | (channel_v & 2) | ((channel_v & 4) >> 2); ptable0004 += (channel_v * 128); ptable8004 += (channel_v * 32); offset1 = 0; do { unsigned int htable_idx, rows = 0; const unsigned int *block; /* [ zzzz y x x ] * xx == 00 :=> end of the block def, remove the two bits from the stream * yxx == 111 * yxx == any other value * */ fill_nbits(pdec, 16); htable_idx = look_nbits(pdec, 6); op = hash_table_ops[htable_idx * 4]; if (op == 2) { skip_nbits(pdec, 2); } else if (op == 1) { /* 15bits [ xxxx xxxx yyyy 111 ] * yyy => offset in the table8004 * xxx => offset in the tabled004 (tree) */ unsigned int mask, shift; unsigned int nbits, col1; unsigned int yyyy; skip_nbits(pdec, 3); /* offset1 += yyyy */ __get_nbits(pdec, 4, yyyy); offset1 += 1 + yyyy; offset1 &= 0x0F; nbits = ptable8004[offset1 * 2]; /* col1 = xxxx xxxx */ __get_nbits(pdec, nbits+1, col1); /* Bit mask table */ mask = pdec->table_bitpowermask[nbits][col1]; shift = ptable8004[offset1 * 2 + 1]; rows = ((mask << shift) + 0x80) & 0xFF; block = pdec->table_subblock[rows]; for (i = 0; i < 16; i++) pdec->temp_colors[i] += block[MulIdx[offset1][i]]; } else { /* op == 0 * offset1 is coded on 3 bits */ unsigned int shift; offset1 += hash_table_ops [htable_idx * 4 + 2]; offset1 &= 0x0F; rows = ptable0004[offset1 + hash_table_ops [htable_idx * 4 + 3]]; block = pdec->table_subblock[rows]; for (i = 0; i < 16; i++) pdec->temp_colors[i] += block[MulIdx[offset1][i]]; shift = hash_table_ops[htable_idx * 4 + 1]; skip_nbits(pdec, shift); } } while (op != 2); } static void DecompressBand23(struct pwc_dec23_private *pdec, const unsigned char *rawyuv, unsigned char *planar_y, unsigned char *planar_u, unsigned char *planar_v, unsigned int compressed_image_width, unsigned int real_image_width) { int compression_index, nblocks; const unsigned char *ptable0004; const unsigned char *ptable8004; pdec->reservoir = 0; pdec->nbits_in_reservoir = 0; pdec->stream = rawyuv + 1; /* The first byte of the stream is skipped */ get_nbits(pdec, 4, compression_index); /* pass 1: uncompress Y component */ nblocks = compressed_image_width / 4; ptable0004 = pdec->table_0004_pass1[compression_index]; ptable8004 = pdec->table_8004_pass1[compression_index]; /* Each block decode a square of 4x4 */ while (nblocks) { decode_block(pdec, ptable0004, ptable8004); copy_image_block_Y(pdec->temp_colors, planar_y, real_image_width, pdec->scalebits); planar_y += 4; nblocks--; } /* pass 2: uncompress UV component */ nblocks = compressed_image_width / 8; ptable0004 = pdec->table_0004_pass2[compression_index]; ptable8004 = pdec->table_8004_pass2[compression_index]; /* Each block decode a square of 4x4 */ while (nblocks) { decode_block(pdec, ptable0004, ptable8004); copy_image_block_CrCb(pdec->temp_colors, planar_u, real_image_width/2, pdec->scalebits); decode_block(pdec, ptable0004, ptable8004); copy_image_block_CrCb(pdec->temp_colors, planar_v, real_image_width/2, pdec->scalebits); planar_v += 8; planar_u += 8; nblocks -= 2; } } /** * pwc_dec23_decompress - Uncompress a pwc23 buffer. * @pdev: pointer to pwc device's internal struct * @src: raw data * @dst: image output */ void pwc_dec23_decompress(struct pwc_device *pdev, const void *src, void *dst) { int bandlines_left, bytes_per_block; struct pwc_dec23_private *pdec = &pdev->dec23; /* YUV420P image format */ unsigned char *pout_planar_y; unsigned char *pout_planar_u; unsigned char *pout_planar_v; unsigned int plane_size; mutex_lock(&pdec->lock); bandlines_left = pdev->height / 4; bytes_per_block = pdev->width * 4; plane_size = pdev->height * pdev->width; pout_planar_y = dst; pout_planar_u = dst + plane_size; pout_planar_v = dst + plane_size + plane_size / 4; while (bandlines_left--) { DecompressBand23(pdec, src, pout_planar_y, pout_planar_u, pout_planar_v, pdev->width, pdev->width); src += pdev->vbandlength; pout_planar_y += bytes_per_block; pout_planar_u += pdev->width; pout_planar_v += pdev->width; } mutex_unlock(&pdec->lock); } |
2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 | // SPDX-License-Identifier: GPL-2.0-only /* * OF helpers for the MDIO (Ethernet PHY) API * * Copyright (c) 2009 Secret Lab Technologies, Ltd. * * This file provides helper functions for extracting PHY device information * out of the OpenFirmware device tree and using it to populate an mii_bus. */ #include <linux/device.h> #include <linux/err.h> #include <linux/fwnode_mdio.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OpenFirmware MDIO bus (Ethernet PHY) accessors"); /* Extract the clause 22 phy ID from the compatible string of the form * ethernet-phy-idAAAA.BBBB */ static int of_get_phy_id(struct device_node *device, u32 *phy_id) { return fwnode_get_phy_id(of_fwnode_handle(device), phy_id); } int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, struct device_node *child, u32 addr) { return fwnode_mdiobus_phy_device_register(mdio, phy, of_fwnode_handle(child), addr); } EXPORT_SYMBOL(of_mdiobus_phy_device_register); static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child, u32 addr) { return fwnode_mdiobus_register_phy(mdio, of_fwnode_handle(child), addr); } static int of_mdiobus_register_device(struct mii_bus *mdio, struct device_node *child, u32 addr) { struct fwnode_handle *fwnode = of_fwnode_handle(child); struct mdio_device *mdiodev; int rc; mdiodev = mdio_device_create(mdio, addr); if (IS_ERR(mdiodev)) return PTR_ERR(mdiodev); /* Associate the OF node with the device structure so it * can be looked up later. */ fwnode_handle_get(fwnode); device_set_node(&mdiodev->dev, fwnode); /* All data is now stored in the mdiodev struct; register it. */ rc = mdio_device_register(mdiodev); if (rc) { device_set_node(&mdiodev->dev, NULL); fwnode_handle_put(fwnode); mdio_device_free(mdiodev); return rc; } dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n", child, addr); return 0; } /* The following is a list of PHY compatible strings which appear in * some DTBs. The compatible string is never matched against a PHY * driver, so is pointless. We only expect devices which are not PHYs * to have a compatible string, so they can be matched to an MDIO * driver. Encourage users to upgrade their DT blobs to remove these. */ static const struct of_device_id whitelist_phys[] = { { .compatible = "brcm,40nm-ephy" }, { .compatible = "broadcom,bcm5241" }, { .compatible = "marvell,88E1111", }, { .compatible = "marvell,88e1116", }, { .compatible = "marvell,88e1118", }, { .compatible = "marvell,88e1145", }, { .compatible = "marvell,88e1149r", }, { .compatible = "marvell,88e1310", }, { .compatible = "marvell,88E1510", }, { .compatible = "marvell,88E1514", }, { .compatible = "moxa,moxart-rtl8201cp", }, {} }; /* * Return true if the child node is for a phy. It must either: * o Compatible string of "ethernet-phy-idX.X" * o Compatible string of "ethernet-phy-ieee802.3-c45" * o Compatible string of "ethernet-phy-ieee802.3-c22" * o In the white list above (and issue a warning) * o No compatibility string * * A device which is not a phy is expected to have a compatible string * indicating what sort of device it is. */ bool of_mdiobus_child_is_phy(struct device_node *child) { u32 phy_id; if (of_get_phy_id(child, &phy_id) != -EINVAL) return true; if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c45")) return true; if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22")) return true; if (of_match_node(whitelist_phys, child)) { pr_warn(FW_WARN "%pOF: Whitelisted compatible string. Please remove\n", child); return true; } if (!of_property_present(child, "compatible")) return true; return false; } EXPORT_SYMBOL(of_mdiobus_child_is_phy); static int __of_mdiobus_parse_phys(struct mii_bus *mdio, struct device_node *np, bool *scanphys) { struct device_node *child; int addr, rc = 0; /* Loop over the child nodes and register a phy_device for each phy */ for_each_available_child_of_node(np, child) { if (of_node_name_eq(child, "ethernet-phy-package")) { /* Ignore invalid ethernet-phy-package node */ if (!of_property_present(child, "reg")) continue; rc = __of_mdiobus_parse_phys(mdio, child, NULL); if (rc && rc != -ENODEV) goto exit; continue; } addr = of_mdio_parse_addr(&mdio->dev, child); if (addr < 0) { /* Skip scanning for invalid ethernet-phy-package node */ if (scanphys) *scanphys = true; continue; } if (of_mdiobus_child_is_phy(child)) rc = of_mdiobus_register_phy(mdio, child, addr); else rc = of_mdiobus_register_device(mdio, child, addr); if (rc == -ENODEV) dev_err(&mdio->dev, "MDIO device at address %d is missing.\n", addr); else if (rc) goto exit; } return 0; exit: of_node_put(child); return rc; } /** * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree * @mdio: pointer to mii_bus structure * @np: pointer to device_node of MDIO bus. * @owner: module owning the @mdio object. * * This function registers the mii_bus structure and registers a phy_device * for each child node of @np. */ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np, struct module *owner) { struct device_node *child; bool scanphys = false; int addr, rc; if (!np) return __mdiobus_register(mdio, owner); /* Do not continue if the node is disabled */ if (!of_device_is_available(np)) return -ENODEV; /* Mask out all PHYs from auto probing. Instead the PHYs listed in * the device tree are populated after the bus has been registered */ mdio->phy_mask = ~0; device_set_node(&mdio->dev, of_fwnode_handle(np)); /* Get bus level PHY reset GPIO details */ mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us); mdio->reset_post_delay_us = 0; of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us); /* Register the MDIO bus */ rc = __mdiobus_register(mdio, owner); if (rc) return rc; /* Loop over the child nodes and register a phy_device for each phy */ rc = __of_mdiobus_parse_phys(mdio, np, &scanphys); if (rc) goto unregister; if (!scanphys) return 0; /* auto scan for PHYs with empty reg property */ for_each_available_child_of_node(np, child) { /* Skip PHYs with reg property set or ethernet-phy-package node */ if (of_property_present(child, "reg") || of_node_name_eq(child, "ethernet-phy-package")) continue; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { /* skip already registered PHYs */ if (mdiobus_is_registered_device(mdio, addr)) continue; /* be noisy to encourage people to set reg property */ dev_info(&mdio->dev, "scan phy %pOFn at address %i\n", child, addr); if (of_mdiobus_child_is_phy(child)) { /* -ENODEV is the return code that PHYLIB has * standardized on to indicate that bus * scanning should continue. */ rc = of_mdiobus_register_phy(mdio, child, addr); if (!rc) break; if (rc != -ENODEV) goto put_unregister; } } } return 0; put_unregister: of_node_put(child); unregister: mdiobus_unregister(mdio); return rc; } EXPORT_SYMBOL(__of_mdiobus_register); /** * of_mdio_find_device - Given a device tree node, find the mdio_device * @np: pointer to the mdio_device's device tree node * * If successful, returns a pointer to the mdio_device with the embedded * struct device refcount incremented by one, or NULL on failure. * The caller should call put_device() on the mdio_device after its use */ struct mdio_device *of_mdio_find_device(struct device_node *np) { return fwnode_mdio_find_device(of_fwnode_handle(np)); } EXPORT_SYMBOL(of_mdio_find_device); /** * of_phy_find_device - Give a PHY node, find the phy_device * @phy_np: Pointer to the phy's device tree node * * If successful, returns a pointer to the phy_device with the embedded * struct device refcount incremented by one, or NULL on failure. */ struct phy_device *of_phy_find_device(struct device_node *phy_np) { return fwnode_phy_find_device(of_fwnode_handle(phy_np)); } EXPORT_SYMBOL(of_phy_find_device); /** * of_phy_connect - Connect to the phy described in the device tree * @dev: pointer to net_device claiming the phy * @phy_np: Pointer to device tree node for the PHY * @hndlr: Link state callback for the network device * @flags: flags to pass to the PHY * @iface: PHY data interface type * * If successful, returns a pointer to the phy_device with the embedded * struct device refcount incremented by one, or NULL on failure. The * refcount must be dropped by calling phy_disconnect() or phy_detach(). */ struct phy_device *of_phy_connect(struct net_device *dev, struct device_node *phy_np, void (*hndlr)(struct net_device *), u32 flags, phy_interface_t iface) { struct phy_device *phy = of_phy_find_device(phy_np); int ret; if (!phy) return NULL; phy->dev_flags |= flags; ret = phy_connect_direct(dev, phy, hndlr, iface); /* refcount is held by phy_connect_direct() on success */ put_device(&phy->mdio.dev); return ret ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect); /** * of_phy_get_and_connect * - Get phy node and connect to the phy described in the device tree * @dev: pointer to net_device claiming the phy * @np: Pointer to device tree node for the net_device claiming the phy * @hndlr: Link state callback for the network device * * If successful, returns a pointer to the phy_device with the embedded * struct device refcount incremented by one, or NULL on failure. The * refcount must be dropped by calling phy_disconnect() or phy_detach(). */ struct phy_device *of_phy_get_and_connect(struct net_device *dev, struct device_node *np, void (*hndlr)(struct net_device *)) { phy_interface_t iface; struct device_node *phy_np; struct phy_device *phy; int ret; ret = of_get_phy_mode(np, &iface); if (ret) return NULL; if (of_phy_is_fixed_link(np)) { ret = of_phy_register_fixed_link(np); if (ret < 0) { netdev_err(dev, "broken fixed-link specification\n"); return NULL; } phy_np = of_node_get(np); } else { phy_np = of_parse_phandle(np, "phy-handle", 0); if (!phy_np) return NULL; } phy = of_phy_connect(dev, phy_np, hndlr, 0, iface); of_node_put(phy_np); return phy; } EXPORT_SYMBOL(of_phy_get_and_connect); /* * of_phy_is_fixed_link() and of_phy_register_fixed_link() must * support two DT bindings: * - the old DT binding, where 'fixed-link' was a property with 5 * cells encoding various information about the fixed PHY * - the new DT binding, where 'fixed-link' is a sub-node of the * Ethernet device. */ bool of_phy_is_fixed_link(struct device_node *np) { struct device_node *dn; int err; const char *managed; /* New binding */ dn = of_get_child_by_name(np, "fixed-link"); if (dn) { of_node_put(dn); return true; } err = of_property_read_string(np, "managed", &managed); if (err == 0 && strcmp(managed, "auto") != 0) return true; /* Old binding */ if (of_property_count_u32_elems(np, "fixed-link") == 5) return true; return false; } EXPORT_SYMBOL(of_phy_is_fixed_link); int of_phy_register_fixed_link(struct device_node *np) { struct fixed_phy_status status = {}; struct device_node *fixed_link_node; u32 fixed_link_prop[5]; const char *managed; if (of_property_read_string(np, "managed", &managed) == 0 && strcmp(managed, "in-band-status") == 0) { /* status is zeroed, namely its .link member */ goto register_phy; } /* New binding */ fixed_link_node = of_get_child_by_name(np, "fixed-link"); if (fixed_link_node) { status.link = 1; status.duplex = of_property_read_bool(fixed_link_node, "full-duplex"); if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) { of_node_put(fixed_link_node); return -EINVAL; } status.pause = of_property_read_bool(fixed_link_node, "pause"); status.asym_pause = of_property_read_bool(fixed_link_node, "asym-pause"); of_node_put(fixed_link_node); goto register_phy; } /* Old binding */ if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop, ARRAY_SIZE(fixed_link_prop)) == 0) { status.link = 1; status.duplex = fixed_link_prop[1]; status.speed = fixed_link_prop[2]; status.pause = fixed_link_prop[3]; status.asym_pause = fixed_link_prop[4]; goto register_phy; } return -ENODEV; register_phy: return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np)); } EXPORT_SYMBOL(of_phy_register_fixed_link); void of_phy_deregister_fixed_link(struct device_node *np) { struct phy_device *phydev; phydev = of_phy_find_device(np); if (!phydev) return; fixed_phy_unregister(phydev); put_device(&phydev->mdio.dev); /* of_phy_find_device() */ phy_device_free(phydev); /* fixed_phy_register() */ } EXPORT_SYMBOL(of_phy_deregister_fixed_link); |
14 14 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 | /* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" #include <linux/skbuff.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/export.h> #include "debug.h" #include "target.h" struct ath6kl_fwlog_slot { __le32 timestamp; __le32 length; /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */ u8 payload[]; }; #define ATH6KL_FWLOG_MAX_ENTRIES 20 #define ATH6KL_FWLOG_VALID_MASK 0x1ffff void ath6kl_printk(const char *level, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sath6kl: %pV", level, &vaf); va_end(args); } EXPORT_SYMBOL(ath6kl_printk); void ath6kl_info(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; ath6kl_printk(KERN_INFO, "%pV", &vaf); trace_ath6kl_log_info(&vaf); va_end(args); } EXPORT_SYMBOL(ath6kl_info); void ath6kl_err(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; ath6kl_printk(KERN_ERR, "%pV", &vaf); trace_ath6kl_log_err(&vaf); va_end(args); } EXPORT_SYMBOL(ath6kl_err); void ath6kl_warn(const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; ath6kl_printk(KERN_WARNING, "%pV", &vaf); trace_ath6kl_log_warn(&vaf); va_end(args); } EXPORT_SYMBOL(ath6kl_warn); int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif) { long left; if (down_interruptible(&ar->sem)) return -EBUSY; set_bit(STATS_UPDATE_PEND, &vif->flags); if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) { up(&ar->sem); return -EIO; } left = wait_event_interruptible_timeout(ar->event_wq, !test_bit(STATS_UPDATE_PEND, &vif->flags), WMI_TIMEOUT); up(&ar->sem); if (left <= 0) return -ETIMEDOUT; return 0; } EXPORT_SYMBOL(ath6kl_read_tgt_stats); #ifdef CONFIG_ATH6KL_DEBUG void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (debug_mask & mask) ath6kl_printk(KERN_DEBUG, "%pV", &vaf); trace_ath6kl_log_dbg(mask, &vaf); va_end(args); } EXPORT_SYMBOL(ath6kl_dbg); void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask, const char *msg, const char *prefix, const void *buf, size_t len) { if (debug_mask & mask) { if (msg) ath6kl_dbg(mask, "%s\n", msg); print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); } /* tracing code doesn't like null strings :/ */ trace_ath6kl_log_dbg_dump(msg ? msg : "", prefix ? prefix : "", buf, len); } EXPORT_SYMBOL(ath6kl_dbg_dump); #define REG_OUTPUT_LEN_PER_LINE 25 #define REGTYPE_STR_LEN 100 struct ath6kl_diag_reg_info { u32 reg_start; u32 reg_end; const char *reg_info; }; static const struct ath6kl_diag_reg_info diag_reg[] = { { 0x20000, 0x200fc, "General DMA and Rx registers" }, { 0x28000, 0x28900, "MAC PCU register & keycache" }, { 0x20800, 0x20a40, "QCU" }, { 0x21000, 0x212f0, "DCU" }, { 0x4000, 0x42e4, "RTC" }, { 0x540000, 0x540000 + (256 * 1024), "RAM" }, { 0x29800, 0x2B210, "Base Band" }, { 0x1C000, 0x1C748, "Analog" }, }; void ath6kl_dump_registers(struct ath6kl_device *dev, struct ath6kl_irq_proc_registers *irq_proc_reg, struct ath6kl_irq_enable_reg *irq_enable_reg) { ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n")); if (irq_proc_reg != NULL) { ath6kl_dbg(ATH6KL_DBG_IRQ, "Host Int status: 0x%x\n", irq_proc_reg->host_int_status); ath6kl_dbg(ATH6KL_DBG_IRQ, "CPU Int status: 0x%x\n", irq_proc_reg->cpu_int_status); ath6kl_dbg(ATH6KL_DBG_IRQ, "Error Int status: 0x%x\n", irq_proc_reg->error_int_status); ath6kl_dbg(ATH6KL_DBG_IRQ, "Counter Int status: 0x%x\n", irq_proc_reg->counter_int_status); ath6kl_dbg(ATH6KL_DBG_IRQ, "Mbox Frame: 0x%x\n", irq_proc_reg->mbox_frame); ath6kl_dbg(ATH6KL_DBG_IRQ, "Rx Lookahead Valid: 0x%x\n", irq_proc_reg->rx_lkahd_valid); ath6kl_dbg(ATH6KL_DBG_IRQ, "Rx Lookahead 0: 0x%x\n", irq_proc_reg->rx_lkahd[0]); ath6kl_dbg(ATH6KL_DBG_IRQ, "Rx Lookahead 1: 0x%x\n", irq_proc_reg->rx_lkahd[1]); if (dev->ar->mbox_info.gmbox_addr != 0) { /* * If the target supports GMBOX hardware, dump some * additional state. */ ath6kl_dbg(ATH6KL_DBG_IRQ, "GMBOX Host Int status 2: 0x%x\n", irq_proc_reg->host_int_status2); ath6kl_dbg(ATH6KL_DBG_IRQ, "GMBOX RX Avail: 0x%x\n", irq_proc_reg->gmbox_rx_avail); ath6kl_dbg(ATH6KL_DBG_IRQ, "GMBOX lookahead alias 0: 0x%x\n", irq_proc_reg->rx_gmbox_lkahd_alias[0]); ath6kl_dbg(ATH6KL_DBG_IRQ, "GMBOX lookahead alias 1: 0x%x\n", irq_proc_reg->rx_gmbox_lkahd_alias[1]); } } if (irq_enable_reg != NULL) { ath6kl_dbg(ATH6KL_DBG_IRQ, "Int status Enable: 0x%x\n", irq_enable_reg->int_status_en); ath6kl_dbg(ATH6KL_DBG_IRQ, "Counter Int status Enable: 0x%x\n", irq_enable_reg->cntr_int_status_en); } ath6kl_dbg(ATH6KL_DBG_IRQ, "<------------------------------->\n"); } static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist) { ath6kl_dbg(ATH6KL_DBG_CREDIT, "--- endpoint: %d svc_id: 0x%X ---\n", ep_dist->endpoint, ep_dist->svc_id); ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n", ep_dist->dist_flags); ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n", ep_dist->cred_norm); ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n", ep_dist->cred_min); ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n", ep_dist->credits); ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n", ep_dist->cred_assngd); ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n", ep_dist->seek_cred); ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n", ep_dist->cred_sz); ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n", ep_dist->cred_per_msg); ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n", ep_dist->cred_to_dist); ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n", get_queue_depth(&ep_dist->htc_ep->txq)); ath6kl_dbg(ATH6KL_DBG_CREDIT, "----------------------------------\n"); } /* FIXME: move to htc.c */ void dump_cred_dist_stats(struct htc_target *target) { struct htc_endpoint_credit_dist *ep_list; list_for_each_entry(ep_list, &target->cred_dist_list, list) dump_cred_dist(ep_list); ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit distribution total %d free %d\n", target->credit_info->total_avail_credits, target->credit_info->cur_free_credits); } void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) { switch (war) { case ATH6KL_WAR_INVALID_RATE: ar->debug.war_stats.invalid_rate++; break; } } static ssize_t read_file_war_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; char *buf; unsigned int len = 0, buf_len = 1500; ssize_t ret_cnt; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%25s\n", "Workaround stats"); len += scnprintf(buf + len, buf_len - len, "%25s\n\n", "================="); len += scnprintf(buf + len, buf_len - len, "%20s %10u\n", "Invalid rates", ar->debug.war_stats.invalid_rate); if (WARN_ON(len > buf_len)) len = buf_len; ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret_cnt; } static const struct file_operations fops_war_stats = { .read = read_file_war_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len) { struct ath6kl_fwlog_slot *slot; struct sk_buff *skb; size_t slot_len; if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) return; slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE; skb = alloc_skb(slot_len, GFP_KERNEL); if (!skb) return; slot = skb_put(skb, slot_len); slot->timestamp = cpu_to_le32(jiffies); slot->length = cpu_to_le32(len); memcpy(slot->payload, buf, len); /* Need to pad each record to fixed length ATH6KL_FWLOG_PAYLOAD_SIZE */ memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len); spin_lock(&ar->debug.fwlog_queue.lock); __skb_queue_tail(&ar->debug.fwlog_queue, skb); complete(&ar->debug.fwlog_completion); /* drop oldest entries */ while (skb_queue_len(&ar->debug.fwlog_queue) > ATH6KL_FWLOG_MAX_ENTRIES) { skb = __skb_dequeue(&ar->debug.fwlog_queue); kfree_skb(skb); } spin_unlock(&ar->debug.fwlog_queue.lock); return; } static int ath6kl_fwlog_open(struct inode *inode, struct file *file) { struct ath6kl *ar = inode->i_private; if (ar->debug.fwlog_open) return -EBUSY; ar->debug.fwlog_open = true; file->private_data = inode->i_private; return 0; } static int ath6kl_fwlog_release(struct inode *inode, struct file *file) { struct ath6kl *ar = inode->i_private; ar->debug.fwlog_open = false; return 0; } static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct sk_buff *skb; ssize_t ret_cnt; size_t len = 0; char *buf; buf = vmalloc(count); if (!buf) return -ENOMEM; /* read undelivered logs from firmware */ ath6kl_read_fwlogs(ar); spin_lock(&ar->debug.fwlog_queue.lock); while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) { if (skb->len > count - len) { /* not enough space, put skb back and leave */ __skb_queue_head(&ar->debug.fwlog_queue, skb); break; } memcpy(buf + len, skb->data, skb->len); len += skb->len; kfree_skb(skb); } spin_unlock(&ar->debug.fwlog_queue.lock); /* FIXME: what to do if len == 0? */ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); vfree(buf); return ret_cnt; } static const struct file_operations fops_fwlog = { .open = ath6kl_fwlog_open, .release = ath6kl_fwlog_release, .read = ath6kl_fwlog_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_fwlog_block_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct sk_buff *skb; ssize_t ret_cnt; size_t len = 0, not_copied; char *buf; int ret; buf = vmalloc(count); if (!buf) return -ENOMEM; spin_lock(&ar->debug.fwlog_queue.lock); if (skb_queue_len(&ar->debug.fwlog_queue) == 0) { /* we must init under queue lock */ init_completion(&ar->debug.fwlog_completion); spin_unlock(&ar->debug.fwlog_queue.lock); ret = wait_for_completion_interruptible( &ar->debug.fwlog_completion); if (ret == -ERESTARTSYS) { vfree(buf); return ret; } spin_lock(&ar->debug.fwlog_queue.lock); } while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) { if (skb->len > count - len) { /* not enough space, put skb back and leave */ __skb_queue_head(&ar->debug.fwlog_queue, skb); break; } memcpy(buf + len, skb->data, skb->len); len += skb->len; kfree_skb(skb); } spin_unlock(&ar->debug.fwlog_queue.lock); /* FIXME: what to do if len == 0? */ not_copied = copy_to_user(user_buf, buf, len); if (not_copied != 0) { ret_cnt = -EFAULT; goto out; } *ppos = *ppos + len; ret_cnt = len; out: vfree(buf); return ret_cnt; } static const struct file_operations fops_fwlog_block = { .open = ath6kl_fwlog_open, .release = ath6kl_fwlog_release, .read = ath6kl_fwlog_block_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; char buf[16]; int len; len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath6kl_fwlog_mask_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; int ret; ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask); if (ret) return ret; ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi, ATH6KL_FWLOG_VALID_MASK, ar->debug.fwlog_mask); if (ret) return ret; return count; } static const struct file_operations fops_fwlog_mask = { .open = simple_open, .read = ath6kl_fwlog_mask_read, .write = ath6kl_fwlog_mask_write, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct ath6kl_vif *vif; struct target_stats *tgt_stats; char *buf; unsigned int len = 0, buf_len = 1500; int i; ssize_t ret_cnt; int rv; vif = ath6kl_vif_first(ar); if (!vif) return -EIO; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; rv = ath6kl_read_tgt_stats(ar, vif); if (rv < 0) { kfree(buf); return rv; } tgt_stats = &vif->target_stats; len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%25s\n", "Target Tx stats"); len += scnprintf(buf + len, buf_len - len, "%25s\n\n", "================="); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Ucast packets", tgt_stats->tx_ucast_pkt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Bcast packets", tgt_stats->tx_bcast_pkt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Ucast byte", tgt_stats->tx_ucast_byte); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Bcast byte", tgt_stats->tx_bcast_byte); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Rts success cnt", tgt_stats->tx_rts_success_cnt); for (i = 0; i < 4; i++) len += scnprintf(buf + len, buf_len - len, "%18s %d %10llu\n", "PER on ac", i, tgt_stats->tx_pkt_per_ac[i]); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Error", tgt_stats->tx_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Fail count", tgt_stats->tx_fail_cnt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Retry count", tgt_stats->tx_retry_cnt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Multi retry cnt", tgt_stats->tx_mult_retry_cnt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Rts fail cnt", tgt_stats->tx_rts_fail_cnt); len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n\n", "TKIP counter measure used", tgt_stats->tkip_cnter_measures_invoked); len += scnprintf(buf + len, buf_len - len, "%25s\n", "Target Rx stats"); len += scnprintf(buf + len, buf_len - len, "%25s\n", "================="); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Ucast packets", tgt_stats->rx_ucast_pkt); len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", "Ucast Rate", tgt_stats->rx_ucast_rate); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Bcast packets", tgt_stats->rx_bcast_pkt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Ucast byte", tgt_stats->rx_ucast_byte); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Bcast byte", tgt_stats->rx_bcast_byte); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Fragmented pkt", tgt_stats->rx_frgment_pkt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Error", tgt_stats->rx_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "CRC Err", tgt_stats->rx_crc_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Key cache miss", tgt_stats->rx_key_cache_miss); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Decrypt Err", tgt_stats->rx_decrypt_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Duplicate frame", tgt_stats->rx_dupl_frame); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Tkip Mic failure", tgt_stats->tkip_local_mic_fail); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "TKIP format err", tgt_stats->tkip_fmt_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "CCMP format Err", tgt_stats->ccmp_fmt_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n\n", "CCMP Replay Err", tgt_stats->ccmp_replays); len += scnprintf(buf + len, buf_len - len, "%25s\n", "Misc Target stats"); len += scnprintf(buf + len, buf_len - len, "%25s\n", "================="); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Beacon Miss count", tgt_stats->cs_bmiss_cnt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Num Connects", tgt_stats->cs_connect_cnt); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Num disconnects", tgt_stats->cs_discon_cnt); len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi); len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", "ARP pkt received", tgt_stats->arp_received); len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", "ARP pkt matched", tgt_stats->arp_matched); len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", "ARP pkt replied", tgt_stats->arp_replied); if (len > buf_len) len = buf_len; ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret_cnt; } static const struct file_operations fops_tgt_stats = { .read = read_file_tgt_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #define print_credit_info(fmt_str, ep_list_field) \ (len += scnprintf(buf + len, buf_len - len, fmt_str, \ ep_list->ep_list_field)) #define CREDIT_INFO_DISPLAY_STRING_LEN 200 #define CREDIT_INFO_LEN 128 static ssize_t read_file_credit_dist_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct htc_target *target = ar->htc_target; struct htc_endpoint_credit_dist *ep_list; char *buf; unsigned int buf_len, len = 0; ssize_t ret_cnt; buf_len = CREDIT_INFO_DISPLAY_STRING_LEN + get_queue_depth(&target->cred_dist_list) * CREDIT_INFO_LEN; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", "Total Avail Credits: ", target->credit_info->total_avail_credits); len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", "Free credits :", target->credit_info->cur_free_credits); len += scnprintf(buf + len, buf_len - len, " Epid Flags Cred_norm Cred_min Credits Cred_assngd" " Seek_cred Cred_sz Cred_per_msg Cred_to_dist" " qdepth\n"); list_for_each_entry(ep_list, &target->cred_dist_list, list) { print_credit_info(" %2d", endpoint); print_credit_info("%10x", dist_flags); print_credit_info("%8d", cred_norm); print_credit_info("%9d", cred_min); print_credit_info("%9d", credits); print_credit_info("%10d", cred_assngd); print_credit_info("%13d", seek_cred); print_credit_info("%12d", cred_sz); print_credit_info("%9d", cred_per_msg); print_credit_info("%14d", cred_to_dist); len += scnprintf(buf + len, buf_len - len, "%12d\n", get_queue_depth(&ep_list->htc_ep->txq)); } if (len > buf_len) len = buf_len; ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret_cnt; } static const struct file_operations fops_credit_dist_stats = { .read = read_file_credit_dist_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static unsigned int print_endpoint_stat(struct htc_target *target, char *buf, unsigned int buf_len, unsigned int len, int offset, const char *name) { int i; struct htc_endpoint_stats *ep_st; u32 *counter; len += scnprintf(buf + len, buf_len - len, "%s:", name); for (i = 0; i < ENDPOINT_MAX; i++) { ep_st = &target->endpoint[i].ep_st; counter = ((u32 *) ep_st) + (offset / 4); len += scnprintf(buf + len, buf_len - len, " %u", *counter); } len += scnprintf(buf + len, buf_len - len, "\n"); return len; } static ssize_t ath6kl_endpoint_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct htc_target *target = ar->htc_target; char *buf; unsigned int buf_len, len = 0; ssize_t ret_cnt; buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) * (25 + ENDPOINT_MAX * 11); buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; #define EPSTAT(name) \ do { \ len = print_endpoint_stat(target, buf, buf_len, len, \ offsetof(struct htc_endpoint_stats, \ name), \ #name); \ } while (0) EPSTAT(cred_low_indicate); EPSTAT(tx_issued); EPSTAT(tx_pkt_bundled); EPSTAT(tx_bundles); EPSTAT(tx_dropped); EPSTAT(tx_cred_rpt); EPSTAT(cred_rpt_from_rx); EPSTAT(cred_rpt_from_other); EPSTAT(cred_rpt_ep0); EPSTAT(cred_from_rx); EPSTAT(cred_from_other); EPSTAT(cred_from_ep0); EPSTAT(cred_cosumd); EPSTAT(cred_retnd); EPSTAT(rx_pkts); EPSTAT(rx_lkahds); EPSTAT(rx_bundl); EPSTAT(rx_bundle_lkahd); EPSTAT(rx_bundle_from_hdr); EPSTAT(rx_alloc_thresh_hit); EPSTAT(rxalloc_thresh_byte); #undef EPSTAT if (len > buf_len) len = buf_len; ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret_cnt; } static ssize_t ath6kl_endpoint_stats_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct htc_target *target = ar->htc_target; int ret, i; u32 val; struct htc_endpoint_stats *ep_st; ret = kstrtou32_from_user(user_buf, count, 0, &val); if (ret) return ret; if (val == 0) { for (i = 0; i < ENDPOINT_MAX; i++) { ep_st = &target->endpoint[i].ep_st; memset(ep_st, 0, sizeof(*ep_st)); } } return count; } static const struct file_operations fops_endpoint_stats = { .open = simple_open, .read = ath6kl_endpoint_stats_read, .write = ath6kl_endpoint_stats_write, .owner = THIS_MODULE, .llseek = default_llseek, }; static unsigned long ath6kl_get_num_reg(void) { int i; unsigned long n_reg = 0; for (i = 0; i < ARRAY_SIZE(diag_reg); i++) n_reg = n_reg + (diag_reg[i].reg_end - diag_reg[i].reg_start) / 4 + 1; return n_reg; } static bool ath6kl_dbg_is_diag_reg_valid(u32 reg_addr) { int i; for (i = 0; i < ARRAY_SIZE(diag_reg); i++) { if (reg_addr >= diag_reg[i].reg_start && reg_addr <= diag_reg[i].reg_end) return true; } return false; } static ssize_t ath6kl_regread_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; u8 buf[50]; unsigned int len = 0; if (ar->debug.dbgfs_diag_reg) len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", ar->debug.dbgfs_diag_reg); else len += scnprintf(buf + len, sizeof(buf) - len, "All diag registers\n"); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath6kl_regread_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; unsigned long reg_addr; if (kstrtoul_from_user(user_buf, count, 0, ®_addr)) return -EINVAL; if ((reg_addr % 4) != 0) return -EINVAL; if (reg_addr && !ath6kl_dbg_is_diag_reg_valid(reg_addr)) return -EINVAL; ar->debug.dbgfs_diag_reg = reg_addr; return count; } static const struct file_operations fops_diag_reg_read = { .read = ath6kl_regread_read, .write = ath6kl_regread_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath6kl_regdump_open(struct inode *inode, struct file *file) { struct ath6kl *ar = inode->i_private; u8 *buf; unsigned long int reg_len; unsigned int len = 0, n_reg; u32 addr; __le32 reg_val; int i, status; /* Dump all the registers if no register is specified */ if (!ar->debug.dbgfs_diag_reg) n_reg = ath6kl_get_num_reg(); else n_reg = 1; reg_len = n_reg * REG_OUTPUT_LEN_PER_LINE; if (n_reg > 1) reg_len += REGTYPE_STR_LEN; buf = vmalloc(reg_len); if (!buf) return -ENOMEM; if (n_reg == 1) { addr = ar->debug.dbgfs_diag_reg; status = ath6kl_diag_read32(ar, TARG_VTOP(ar->target_type, addr), (u32 *)®_val); if (status) goto fail_reg_read; len += scnprintf(buf + len, reg_len - len, "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val)); goto done; } for (i = 0; i < ARRAY_SIZE(diag_reg); i++) { len += scnprintf(buf + len, reg_len - len, "%s\n", diag_reg[i].reg_info); for (addr = diag_reg[i].reg_start; addr <= diag_reg[i].reg_end; addr += 4) { status = ath6kl_diag_read32(ar, TARG_VTOP(ar->target_type, addr), (u32 *)®_val); if (status) goto fail_reg_read; len += scnprintf(buf + len, reg_len - len, "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val)); } } done: file->private_data = buf; return 0; fail_reg_read: ath6kl_warn("Unable to read memory:%u\n", addr); vfree(buf); return -EIO; } static ssize_t ath6kl_regdump_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { u8 *buf = file->private_data; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } static int ath6kl_regdump_release(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static const struct file_operations fops_reg_dump = { .open = ath6kl_regdump_open, .read = ath6kl_regdump_read, .release = ath6kl_regdump_release, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_lrssi_roam_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; unsigned long lrssi_roam_threshold; int ret; if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold)) return -EINVAL; ar->lrssi_roam_threshold = lrssi_roam_threshold; ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold); if (ret) return ret; return count; } static ssize_t ath6kl_lrssi_roam_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; char buf[32]; unsigned int len; len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_lrssi_roam_threshold = { .read = ath6kl_lrssi_roam_read, .write = ath6kl_lrssi_roam_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_regwrite_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; u8 buf[32]; unsigned int len = 0; len = scnprintf(buf, sizeof(buf), "Addr: 0x%x Val: 0x%x\n", ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath6kl_regwrite_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; char buf[32]; char *sptr, *token; unsigned int len = 0; u32 reg_addr, reg_val; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; sptr = buf; token = strsep(&sptr, "="); if (!token) return -EINVAL; if (kstrtou32(token, 0, ®_addr)) return -EINVAL; if (!ath6kl_dbg_is_diag_reg_valid(reg_addr)) return -EINVAL; if (kstrtou32(sptr, 0, ®_val)) return -EINVAL; ar->debug.diag_reg_addr_wr = reg_addr; ar->debug.diag_reg_val_wr = reg_val; if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr, cpu_to_le32(ar->debug.diag_reg_val_wr))) return -EIO; return count; } static const struct file_operations fops_diag_reg_write = { .read = ath6kl_regwrite_read, .write = ath6kl_regwrite_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf, size_t len) { const struct wmi_target_roam_tbl *tbl; u16 num_entries; if (len < sizeof(*tbl)) return -EINVAL; tbl = (const struct wmi_target_roam_tbl *) buf; num_entries = le16_to_cpu(tbl->num_entries); if (struct_size(tbl, info, num_entries) > len) return -EINVAL; if (ar->debug.roam_tbl == NULL || ar->debug.roam_tbl_len < (unsigned int) len) { kfree(ar->debug.roam_tbl); ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC); if (ar->debug.roam_tbl == NULL) return -ENOMEM; } memcpy(ar->debug.roam_tbl, buf, len); ar->debug.roam_tbl_len = len; if (test_bit(ROAM_TBL_PEND, &ar->flag)) { clear_bit(ROAM_TBL_PEND, &ar->flag); wake_up(&ar->event_wq); } return 0; } static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; int ret; long left; struct wmi_target_roam_tbl *tbl; u16 num_entries, i; char *buf; unsigned int len, buf_len; ssize_t ret_cnt; if (down_interruptible(&ar->sem)) return -EBUSY; set_bit(ROAM_TBL_PEND, &ar->flag); ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi); if (ret) { up(&ar->sem); return ret; } left = wait_event_interruptible_timeout( ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT); up(&ar->sem); if (left <= 0) return -ETIMEDOUT; if (ar->debug.roam_tbl == NULL) return -ENOMEM; tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; num_entries = le16_to_cpu(tbl->num_entries); buf_len = 100 + num_entries * 100; buf = kzalloc(buf_len, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len = 0; len += scnprintf(buf + len, buf_len - len, "roam_mode=%u\n\n" "# roam_util bssid rssi rssidt last_rssi util bias\n", le16_to_cpu(tbl->roam_mode)); for (i = 0; i < num_entries; i++) { struct wmi_bss_roam_info *info = &tbl->info[i]; len += scnprintf(buf + len, buf_len - len, "%d %pM %d %d %d %d %d\n", a_sle32_to_cpu(info->roam_util), info->bssid, info->rssi, info->rssidt, info->last_rssi, info->util, info->bias); } if (len > buf_len) len = buf_len; ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret_cnt; } static const struct file_operations fops_roam_table = { .read = ath6kl_roam_table_read, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_force_roam_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; int ret; char buf[20]; size_t len; u8 bssid[ETH_ALEN]; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (!mac_pton(buf, bssid)) return -EINVAL; ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); if (ret) return ret; return count; } static const struct file_operations fops_force_roam = { .write = ath6kl_force_roam_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_roam_mode_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; int ret; char buf[20]; size_t len; enum wmi_roam_mode mode; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (len > 0 && buf[len - 1] == '\n') buf[len - 1] = '\0'; if (strcasecmp(buf, "default") == 0) mode = WMI_DEFAULT_ROAM_MODE; else if (strcasecmp(buf, "bssbias") == 0) mode = WMI_HOST_BIAS_ROAM_MODE; else if (strcasecmp(buf, "lock") == 0) mode = WMI_LOCK_BSS_MODE; else return -EINVAL; ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode); if (ret) return ret; return count; } static const struct file_operations fops_roam_mode = { .write = ath6kl_roam_mode_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive) { ar->debug.keepalive = keepalive; } static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; char buf[16]; int len; len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath6kl_keepalive_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; int ret; u8 val; ret = kstrtou8_from_user(user_buf, count, 0, &val); if (ret) return ret; ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val); if (ret) return ret; return count; } static const struct file_operations fops_keepalive = { .open = simple_open, .read = ath6kl_keepalive_read, .write = ath6kl_keepalive_write, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout) { ar->debug.disc_timeout = timeout; } static ssize_t ath6kl_disconnect_timeout_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; char buf[16]; int len; len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath6kl_disconnect_timeout_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; int ret; u8 val; ret = kstrtou8_from_user(user_buf, count, 0, &val); if (ret) return ret; ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val); if (ret) return ret; return count; } static const struct file_operations fops_disconnect_timeout = { .open = simple_open, .read = ath6kl_disconnect_timeout_read, .write = ath6kl_disconnect_timeout_write, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_create_qos_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct ath6kl_vif *vif; char buf[200]; ssize_t len; char *sptr, *token; struct wmi_create_pstream_cmd pstream; u32 val32; u16 val16; vif = ath6kl_vif_first(ar); if (!vif) return -EIO; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; sptr = buf; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &pstream.user_pri)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &pstream.traffic_direc)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &pstream.traffic_class)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &pstream.traffic_type)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &pstream.voice_psc_cap)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.min_service_int = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.max_service_int = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.inactivity_int = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.suspension_int = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.service_start_time = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &pstream.tsid)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou16(token, 0, &val16)) return -EINVAL; pstream.nominal_msdu = cpu_to_le16(val16); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou16(token, 0, &val16)) return -EINVAL; pstream.max_msdu = cpu_to_le16(val16); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.min_data_rate = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.mean_data_rate = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.peak_data_rate = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.max_burst_size = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.delay_bound = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.min_phy_rate = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.sba = cpu_to_le32(val32); token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou32(token, 0, &val32)) return -EINVAL; pstream.medium_time = cpu_to_le32(val32); pstream.nominal_phy = le32_to_cpu(pstream.min_phy_rate) / 1000000; ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream); return count; } static const struct file_operations fops_create_qos = { .write = ath6kl_create_qos_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_delete_qos_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct ath6kl_vif *vif; char buf[100]; ssize_t len; char *sptr, *token; u8 traffic_class; u8 tsid; vif = ath6kl_vif_first(ar); if (!vif) return -EIO; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; sptr = buf; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &traffic_class)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou8(token, 0, &tsid)) return -EINVAL; ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx, traffic_class, tsid); return count; } static const struct file_operations fops_delete_qos = { .write = ath6kl_delete_qos_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_bgscan_int_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct ath6kl_vif *vif; u16 bgscan_int; char buf[32]; ssize_t len; vif = ath6kl_vif_first(ar); if (!vif) return -EIO; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtou16(buf, 0, &bgscan_int)) return -EINVAL; if (bgscan_int == 0) bgscan_int = 0xffff; vif->bg_scan_period = bgscan_int; ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3, 0, 0, 0); return count; } static const struct file_operations fops_bgscan_int = { .write = ath6kl_bgscan_int_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_listen_int_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct ath6kl_vif *vif; u16 listen_interval; char buf[32]; ssize_t len; vif = ath6kl_vif_first(ar); if (!vif) return -EIO; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtou16(buf, 0, &listen_interval)) return -EINVAL; if ((listen_interval < 15) || (listen_interval > 3000)) return -EINVAL; vif->listen_intvl_t = listen_interval; ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, vif->listen_intvl_t, 0); return count; } static ssize_t ath6kl_listen_int_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; struct ath6kl_vif *vif; char buf[32]; int len; vif = ath6kl_vif_first(ar); if (!vif) return -EIO; len = scnprintf(buf, sizeof(buf), "%u\n", vif->listen_intvl_t); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_listen_int = { .read = ath6kl_listen_int_read, .write = ath6kl_listen_int_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath6kl_power_params_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath6kl *ar = file->private_data; u8 buf[100]; unsigned int len = 0; char *sptr, *token; u16 idle_period, ps_poll_num, dtim, tx_wakeup, num_tx; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; sptr = buf; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou16(token, 0, &idle_period)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou16(token, 0, &ps_poll_num)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou16(token, 0, &dtim)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou16(token, 0, &tx_wakeup)) return -EINVAL; token = strsep(&sptr, " "); if (!token) return -EINVAL; if (kstrtou16(token, 0, &num_tx)) return -EINVAL; ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num, dtim, tx_wakeup, num_tx, 0); return count; } static const struct file_operations fops_power_params = { .write = ath6kl_power_params_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath6kl_debug_init(struct ath6kl *ar) { skb_queue_head_init(&ar->debug.fwlog_queue); init_completion(&ar->debug.fwlog_completion); /* * Actually we are lying here but don't know how to read the mask * value from the firmware. */ ar->debug.fwlog_mask = 0; } /* * Initialisation needs to happen in two stages as fwlog events can come * before cfg80211 is initialised, and debugfs depends on cfg80211 * initialisation. */ int ath6kl_debug_init_fs(struct ath6kl *ar) { ar->debugfs_phy = debugfs_create_dir("ath6kl", ar->wiphy->debugfsdir); debugfs_create_file("tgt_stats", 0400, ar->debugfs_phy, ar, &fops_tgt_stats); if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO) debugfs_create_file("credit_dist_stats", 0400, ar->debugfs_phy, ar, &fops_credit_dist_stats); debugfs_create_file("endpoint_stats", 0600, ar->debugfs_phy, ar, &fops_endpoint_stats); debugfs_create_file("fwlog", 0400, ar->debugfs_phy, ar, &fops_fwlog); debugfs_create_file("fwlog_block", 0400, ar->debugfs_phy, ar, &fops_fwlog_block); debugfs_create_file("fwlog_mask", 0600, ar->debugfs_phy, ar, &fops_fwlog_mask); debugfs_create_file("reg_addr", 0600, ar->debugfs_phy, ar, &fops_diag_reg_read); debugfs_create_file("reg_dump", 0400, ar->debugfs_phy, ar, &fops_reg_dump); debugfs_create_file("lrssi_roam_threshold", 0600, ar->debugfs_phy, ar, &fops_lrssi_roam_threshold); debugfs_create_file("reg_write", 0600, ar->debugfs_phy, ar, &fops_diag_reg_write); debugfs_create_file("war_stats", 0400, ar->debugfs_phy, ar, &fops_war_stats); debugfs_create_file("roam_table", 0400, ar->debugfs_phy, ar, &fops_roam_table); debugfs_create_file("force_roam", 0200, ar->debugfs_phy, ar, &fops_force_roam); debugfs_create_file("roam_mode", 0200, ar->debugfs_phy, ar, &fops_roam_mode); debugfs_create_file("keepalive", 0600, ar->debugfs_phy, ar, &fops_keepalive); debugfs_create_file("disconnect_timeout", 0600, ar->debugfs_phy, ar, &fops_disconnect_timeout); debugfs_create_file("create_qos", 0200, ar->debugfs_phy, ar, &fops_create_qos); debugfs_create_file("delete_qos", 0200, ar->debugfs_phy, ar, &fops_delete_qos); debugfs_create_file("bgscan_interval", 0200, ar->debugfs_phy, ar, &fops_bgscan_int); debugfs_create_file("listen_interval", 0600, ar->debugfs_phy, ar, &fops_listen_int); debugfs_create_file("power_params", 0200, ar->debugfs_phy, ar, &fops_power_params); return 0; } void ath6kl_debug_cleanup(struct ath6kl *ar) { skb_queue_purge(&ar->debug.fwlog_queue); complete(&ar->debug.fwlog_completion); kfree(ar->debug.roam_tbl); } #endif |
1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include <linux/module.h> #include <linux/usb.h> #include "main.h" #include "rtw8822b.h" #include "usb.h" static const struct usb_device_id rtw_8822bu_id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb812, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82c, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2102, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* CCNC */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb822, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Edimax EW-7822ULC */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc822, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Edimax EW-7822UTC */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd822, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Edimax */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xe822, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Edimax */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xf822, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Edimax EW-7822UAD */ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb81a, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Default ID */ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1841, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ASUS AC1300 USB-AC55 B1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x184c, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ASUS U2 */ { USB_DEVICE_AND_INTERFACE_INFO(0x0B05, 0x19aa, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ASUS - USB-AC58 rev A1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x0B05, 0x1870, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ASUS */ { USB_DEVICE_AND_INTERFACE_INFO(0x0B05, 0x1874, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ASUS */ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331e, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Dlink - DWA-181 */ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331c, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Dlink - DWA-182 - D1 */ {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331f, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec)}, /* Dlink - DWA-183 D Ver */ { USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x0043, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Linksys WUSB6400M */ { USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x0045, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Linksys WUSB3600 v2 */ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x012d, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TP-Link Archer T3U v1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0138, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TP-Link Archer T3U Plus v1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0115, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TP-Link Archer T4U V3 */ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x012e, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TP-LINK */ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0116, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TP-LINK */ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0117, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TP-LINK */ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9055, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Netgear A6150 */ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0025, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Hawking HW12ACU */ { USB_DEVICE_AND_INTERFACE_INFO(0x04ca, 0x8602, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* LiteOn */ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x808a, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-808UBM */ {}, }; MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table); static int rtw8822bu_probe(struct usb_interface *intf, const struct usb_device_id *id) { return rtw_usb_probe(intf, id); } static struct usb_driver rtw_8822bu_driver = { .name = "rtw_8822bu", .id_table = rtw_8822bu_id_table, .probe = rtw8822bu_probe, .disconnect = rtw_usb_disconnect, }; module_usb_driver(rtw_8822bu_driver); MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822bu driver"); MODULE_LICENSE("Dual BSD/GPL"); |
22 20 5 57 14 13 12 12 12 1 12 12 12 242 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_SIGNAL_H #define _LINUX_SCHED_SIGNAL_H #include <linux/rculist.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/sched/jobctl.h> #include <linux/sched/task.h> #include <linux/cred.h> #include <linux/refcount.h> #include <linux/pid.h> #include <linux/posix-timers.h> #include <linux/mm_types.h> #include <asm/ptrace.h> /* * Types defining task->signal and task->sighand and APIs using them: */ struct sighand_struct { spinlock_t siglock; refcount_t count; wait_queue_head_t signalfd_wqh; struct k_sigaction action[_NSIG]; }; /* * Per-process accounting stats: */ struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; u64 ac_utime, ac_stime; unsigned long ac_minflt, ac_majflt; }; struct cpu_itimer { u64 expires; u64 incr; }; /* * This is the atomic variant of task_cputime, which can be used for * storing and updating task_cputime statistics without locking. */ struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; }; #define INIT_CPUTIME_ATOMIC \ (struct task_cputime_atomic) { \ .utime = ATOMIC64_INIT(0), \ .stime = ATOMIC64_INIT(0), \ .sum_exec_runtime = ATOMIC64_INIT(0), \ } /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; }; struct multiprocess_signals { sigset_t signal; struct hlist_node node; }; struct core_thread { struct task_struct *task; struct core_thread *next; }; struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; }; /* * NOTE! "signal_struct" does not have its own * locking, because a shared signal_struct always * implies a shared sighand_struct, so locking * sighand_struct is always a proper superset of * the locking of signal_struct. */ struct signal_struct { refcount_t sigcnt; atomic_t live; int nr_threads; int quick_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; /* For collecting multiprocess signals during fork */ struct hlist_head multiprocess; /* thread group exit support */ int group_exit_code; /* notify group_exec_task when notify_count is less or equal to 0 */ int notify_count; struct task_struct *group_exec_task; /* thread group stop support, overloads group_exit_code too */ int group_stop_count; unsigned int flags; /* see SIGNAL_* flags below */ struct core_state *core_state; /* coredumping support */ /* * PR_SET_CHILD_SUBREAPER marks a process, like a service * manager, to re-parent orphan (double-forking) child processes * to this process instead of 'init'. The service manager is * able to receive SIGCHLD signals and is able to investigate * the process until it calls wait(). All children of this * process will inherit a flag if they should look for a * child_subreaper process at exit. */ unsigned int is_child_subreaper:1; unsigned int has_child_subreaper:1; #ifdef CONFIG_POSIX_TIMERS /* POSIX.1b Interval Timers */ unsigned int next_posix_timer_id; struct hlist_head posix_timers; /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; ktime_t it_real_incr; /* * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these * values are defined to 0 and 1 respectively */ struct cpu_itimer it[2]; /* * Thread group totals for process CPU timers. * See thread_group_cputimer(), et al, for details. */ struct thread_group_cputimer cputimer; #endif /* Empty if CONFIG_POSIX_TIMERS=n */ struct posix_cputimers posix_cputimers; /* PID/PID hash table linkage. */ struct pid *pids[PIDTYPE_MAX]; #ifdef CONFIG_NO_HZ_FULL atomic_t tick_dep_mask; #endif struct pid *tty_old_pgrp; /* boolean value for session group leader */ int leader; struct tty_struct *tty; /* NULL if no tty */ #ifdef CONFIG_SCHED_AUTOGROUP struct autogroup *autogroup; #endif /* * Cumulative resource counters for dead threads in the group, * and for reaped dead child processes forked by this group. * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */ seqlock_t stats_lock; u64 utime, stime, cutime, cstime; u64 gtime; u64 cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; unsigned long maxrss, cmaxrss; struct task_io_accounting ioac; /* * Cumulative ns of schedule CPU time fo dead threads in the * group, not including a zombie group leader, (This only differs * from jiffies_to_ns(utime + stime) if sched_clock uses something * other than jiffies.) */ unsigned long long sum_sched_runtime; /* * We don't bother to synchronize most readers of this at all, * because there is no reader checking a limit that actually needs * to get both rlim_cur and rlim_max atomically, and either one * alone is a single word that can safely be read normally. * getrlimit/setrlimit use task_lock(current->group_leader) to * protect this instead of the siglock, because they really * have no need to disable irqs. */ struct rlimit rlim[RLIM_NLIMITS]; #ifdef CONFIG_BSD_PROCESS_ACCT struct pacct_struct pacct; /* per-process accounting information */ #endif #ifdef CONFIG_TASKSTATS struct taskstats *stats; #endif #ifdef CONFIG_AUDIT unsigned audit_tty; struct tty_audit_buf *tty_audit_buf; #endif /* * Thread is the potential origin of an oom condition; kill first on * oom */ bool oom_flag_origin; short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj_min; /* OOM kill score adjustment min value. * Only settable by CAP_SYS_RESOURCE. */ struct mm_struct *oom_mm; /* recorded mm when the thread group got * killed by the oom killer */ struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations * (notably. ptrace) * Deprecated do not use in new code. * Use exec_update_lock instead. */ struct rw_semaphore exec_update_lock; /* Held while task_struct is * being updated during exec, * and may have inconsistent * permissions. */ } __randomize_layout; /* * Bits in flags field of signal_struct. */ #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ /* * Pending notifications to parent. */ #define SIGNAL_CLD_STOPPED 0x00000010 #define SIGNAL_CLD_CONTINUED 0x00000020 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ SIGNAL_STOP_CONTINUED) static inline void signal_set_stop_flags(struct signal_struct *sig, unsigned int flags) { WARN_ON(sig->flags & SIGNAL_GROUP_EXIT); sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; } extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type); static inline int kernel_dequeue_signal(void) { struct task_struct *task = current; kernel_siginfo_t __info; enum pid_type __type; int ret; spin_lock_irq(&task->sighand->siglock); ret = dequeue_signal(&task->blocked, &__info, &__type); spin_unlock_irq(&task->sighand->siglock); return ret; } static inline void kernel_signal_stop(void) { spin_lock_irq(¤t->sighand->siglock); if (current->jobctl & JOBCTL_STOP_DEQUEUED) { current->jobctl |= JOBCTL_STOPPED; set_special_state(TASK_STOPPED); } spin_unlock_irq(¤t->sighand->siglock); schedule(); } int force_sig_fault_to_task(int sig, int code, void __user *addr, struct task_struct *t); int force_sig_fault(int sig, int code, void __user *addr); int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t); int force_sig_mceerr(int code, void __user *, short); int send_sig_mceerr(int code, void __user *, short, struct task_struct *); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); int force_sig_pkuerr(void __user *addr, u32 pkey); int send_sig_perf(void __user *addr, u32 type, u64 sig_data); int force_sig_ptrace_errno_trap(int errno, void __user *addr); int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno); int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, struct task_struct *t); int force_sig_seccomp(int syscall, int reason, bool force_coredump); extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern void force_sigsegv(int sig); extern int force_sig_info(struct kernel_siginfo *); extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int); extern void force_fatal_sig(int); extern void force_exit_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); static inline void clear_notify_signal(void) { clear_thread_flag(TIF_NOTIFY_SIGNAL); smp_mb__after_atomic(); } /* * Returns 'true' if kick_process() is needed to force a transition from * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work. */ static inline bool __set_notify_signal(struct task_struct *task) { return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && !wake_up_state(task, TASK_INTERRUPTIBLE); } /* * Called to break out of interruptible wait loops, and enter the * exit_to_user_mode_loop(). */ static inline void set_notify_signal(struct task_struct *task) { if (__set_notify_signal(task)) kick_process(task); } static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); return -ERESTARTNOINTR; } static inline int task_sigpending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } static inline int signal_pending(struct task_struct *p) { /* * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same * behavior in terms of ensuring that we break out of wait loops * so that notify signal callbacks can be processed. */ if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) return 1; return task_sigpending(p); } static inline int __fatal_signal_pending(struct task_struct *p) { return unlikely(sigismember(&p->pending.signal, SIGKILL)); } static inline int fatal_signal_pending(struct task_struct *p) { return task_sigpending(p) && __fatal_signal_pending(p); } static inline int signal_pending_state(unsigned int state, struct task_struct *p) { if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) return 0; if (!signal_pending(p)) return 0; return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } /* * This should only be used in fault handlers to decide whether we * should stop the current fault routine to handle the signals * instead, especially with the case where we've got interrupted with * a VM_FAULT_RETRY. */ static inline bool fault_signal_pending(vm_fault_t fault_flags, struct pt_regs *regs) { return unlikely((fault_flags & VM_FAULT_RETRY) && (fatal_signal_pending(current) || (user_mode(regs) && signal_pending(current)))); } /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. * This is required every time the blocked sigset_t changes. * callers must hold sighand->siglock. */ extern void recalc_sigpending(void); extern void calculate_sigpending(void); extern void signal_wake_up_state(struct task_struct *t, unsigned int state); static inline void signal_wake_up(struct task_struct *t, bool fatal) { unsigned int state = 0; if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) { t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED); state = TASK_WAKEKILL | __TASK_TRACED; } signal_wake_up_state(t, state); } static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) { unsigned int state = 0; if (resume) { t->jobctl &= ~JOBCTL_TRACED; state = __TASK_TRACED; } signal_wake_up_state(t, state); } void task_join_group_stop(struct task_struct *task); #ifdef TIF_RESTORE_SIGMASK /* * Legacy restore_sigmask accessors. These are inefficient on * SMP architectures because they require atomic operations. */ /** * set_restore_sigmask() - make sure saved_sigmask processing gets done * * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code * will run before returning to user mode, to process the flag. For * all callers, TIF_SIGPENDING is already set or it's no harm to set * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the * arch code will notice on return to user mode, in case those bits * are scarce. We set TIF_SIGPENDING here to ensure that the arch * signal code always gets run when TIF_RESTORE_SIGMASK is set. */ static inline void set_restore_sigmask(void) { set_thread_flag(TIF_RESTORE_SIGMASK); } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); } static inline void clear_restore_sigmask(void) { clear_thread_flag(TIF_RESTORE_SIGMASK); } static inline bool test_tsk_restore_sigmask(struct task_struct *task) { return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); } static inline bool test_restore_sigmask(void) { return test_thread_flag(TIF_RESTORE_SIGMASK); } static inline bool test_and_clear_restore_sigmask(void) { return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); } #else /* TIF_RESTORE_SIGMASK */ /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ static inline void set_restore_sigmask(void) { current->restore_sigmask = true; } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { task->restore_sigmask = false; } static inline void clear_restore_sigmask(void) { current->restore_sigmask = false; } static inline bool test_restore_sigmask(void) { return current->restore_sigmask; } static inline bool test_tsk_restore_sigmask(struct task_struct *task) { return task->restore_sigmask; } static inline bool test_and_clear_restore_sigmask(void) { if (!current->restore_sigmask) return false; current->restore_sigmask = false; return true; } #endif static inline void restore_saved_sigmask(void) { if (test_and_clear_restore_sigmask()) __set_current_blocked(¤t->saved_sigmask); } extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); static inline void restore_saved_sigmask_unless(bool interrupted) { if (interrupted) WARN_ON(!signal_pending(current)); else restore_saved_sigmask(); } static inline sigset_t *sigmask_to_save(void) { sigset_t *res = ¤t->blocked; if (unlikely(test_restore_sigmask())) res = ¤t->saved_sigmask; return res; } static inline int kill_cad_pid(int sig, int priv) { return kill_pid(cad_pid, sig, priv); } /* These can be the second arg to send_sig_info/send_group_sig_info. */ #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) static inline int __on_sig_stack(unsigned long sp) { #ifdef CONFIG_STACK_GROWSUP return sp >= current->sas_ss_sp && sp - current->sas_ss_sp < current->sas_ss_size; #else return sp > current->sas_ss_sp && sp - current->sas_ss_sp <= current->sas_ss_size; #endif } /* * True if we are on the alternate signal stack. */ static inline int on_sig_stack(unsigned long sp) { /* * If the signal stack is SS_AUTODISARM then, by construction, we * can't be on the signal stack unless user code deliberately set * SS_AUTODISARM when we were already on it. * * This improves reliability: if user state gets corrupted such that * the stack pointer points very close to the end of the signal stack, * then this check will enable the signal to be handled anyway. */ if (current->sas_ss_flags & SS_AUTODISARM) return 0; return __on_sig_stack(sp); } static inline int sas_ss_flags(unsigned long sp) { if (!current->sas_ss_size) return SS_DISABLE; return on_sig_stack(sp) ? SS_ONSTACK : 0; } static inline void sas_ss_reset(struct task_struct *p) { p->sas_ss_sp = 0; p->sas_ss_size = 0; p->sas_ss_flags = SS_DISABLE; } static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) { if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) #ifdef CONFIG_STACK_GROWSUP return current->sas_ss_sp; #else return current->sas_ss_sp + current->sas_ss_size; #endif return sp; } extern void __cleanup_sighand(struct sighand_struct *); extern void flush_itimer_signals(void); #define tasklist_empty() \ list_empty(&init_task.tasks) #define next_task(p) \ list_entry_rcu((p)->tasks.next, struct task_struct, tasks) #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) extern bool current_is_single_threaded(void); /* * Without tasklist/siglock it is only rcu-safe if g can't exit/exec, * otherwise next_thread(t) will never reach g after list_del_rcu(g). */ #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) #define for_other_threads(p, t) \ for (t = p; (t = next_thread(t)) != p; ) #define __for_each_thread(signal, t) \ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \ lockdep_is_held(&tasklist_lock)) #define for_each_thread(p, t) \ __for_each_thread((p)->signal, t) /* Careful: this is a double loop, 'break' won't work as expected. */ #define for_each_process_thread(p, t) \ for_each_process(p) for_each_thread(p, t) typedef int (*proc_visitor)(struct task_struct *p, void *data); void walk_process_tree(struct task_struct *top, proc_visitor, void *); static inline struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { struct pid *pid; if (type == PIDTYPE_PID) pid = task_pid(task); else pid = task->signal->pids[type]; return pid; } static inline struct pid *task_tgid(struct task_struct *task) { return task->signal->pids[PIDTYPE_TGID]; } /* * Without tasklist or RCU lock it is not safe to dereference * the result of task_pgrp/task_session even if task == current, * we can race with another thread doing sys_setsid/sys_setpgid. */ static inline struct pid *task_pgrp(struct task_struct *task) { return task->signal->pids[PIDTYPE_PGID]; } static inline struct pid *task_session(struct task_struct *task) { return task->signal->pids[PIDTYPE_SID]; } static inline int get_nr_threads(struct task_struct *task) { return task->signal->nr_threads; } static inline bool thread_group_leader(struct task_struct *p) { return p->exit_signal >= 0; } static inline bool same_thread_group(struct task_struct *p1, struct task_struct *p2) { return p1->signal == p2->signal; } /* * returns NULL if p is the last thread in the thread group */ static inline struct task_struct *__next_thread(struct task_struct *p) { return list_next_or_null_rcu(&p->signal->thread_head, &p->thread_node, struct task_struct, thread_node); } static inline struct task_struct *next_thread(struct task_struct *p) { return __next_thread(p) ?: p->group_leader; } static inline int thread_group_empty(struct task_struct *p) { return thread_group_leader(p) && list_is_last(&p->thread_node, &p->signal->thread_head); } #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, unsigned long *flags); static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, unsigned long *flags) { struct sighand_struct *ret; ret = __lock_task_sighand(task, flags); (void)__cond_lock(&task->sighand->siglock, ret); return ret; } static inline void unlock_task_sighand(struct task_struct *task, unsigned long *flags) { spin_unlock_irqrestore(&task->sighand->siglock, *flags); } #ifdef CONFIG_LOCKDEP extern void lockdep_assert_task_sighand_held(struct task_struct *task); #else static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } #endif static inline unsigned long task_rlimit(const struct task_struct *task, unsigned int limit) { return READ_ONCE(task->signal->rlim[limit].rlim_cur); } static inline unsigned long task_rlimit_max(const struct task_struct *task, unsigned int limit) { return READ_ONCE(task->signal->rlim[limit].rlim_max); } static inline unsigned long rlimit(unsigned int limit) { return task_rlimit(current, limit); } static inline unsigned long rlimit_max(unsigned int limit) { return task_rlimit_max(current, limit); } #endif /* _LINUX_SCHED_SIGNAL_H */ |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* Copyright(c) 2018-2019 Realtek Corporation */ #ifndef __RTK_MAIN_H_ #define __RTK_MAIN_H_ #include <net/mac80211.h> #include <linux/vmalloc.h> #include <linux/firmware.h> #include <linux/average.h> #include <linux/bitops.h> #include <linux/bitfield.h> #include <linux/iopoll.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include "util.h" #define RTW_MAX_MAC_ID_NUM 32 #define RTW_MAX_SEC_CAM_NUM 32 #define MAX_PG_CAM_BACKUP_NUM 8 #define RTW_SCAN_MAX_SSIDS 4 #define RTW_MAX_PATTERN_NUM 12 #define RTW_MAX_PATTERN_MASK_SIZE 16 #define RTW_MAX_PATTERN_SIZE 128 #define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2) #define RFREG_MASK 0xfffff #define INV_RF_DATA 0xffffffff #define TX_PAGE_SIZE_SHIFT 7 #define TX_PAGE_SIZE (1 << TX_PAGE_SIZE_SHIFT) #define RTW_CHANNEL_WIDTH_MAX 3 #define RTW_RF_PATH_MAX 4 #define HW_FEATURE_LEN 13 #define RTW_TP_SHIFT 18 /* bytes/2s --> Mbps */ extern bool rtw_bf_support; extern bool rtw_disable_lps_deep_mode; extern unsigned int rtw_debug_mask; extern bool rtw_edcca_enabled; extern const struct ieee80211_ops rtw_ops; #define RTW_MAX_CHANNEL_NUM_2G 14 #define RTW_MAX_CHANNEL_NUM_5G 49 struct rtw_dev; struct rtw_debugfs; enum rtw_hci_type { RTW_HCI_TYPE_PCIE, RTW_HCI_TYPE_USB, RTW_HCI_TYPE_SDIO, RTW_HCI_TYPE_UNDEFINE, }; struct rtw_hci { struct rtw_hci_ops *ops; enum rtw_hci_type type; u32 rpwm_addr; u32 cpwm_addr; u8 bulkout_num; }; #define IS_CH_5G_BAND_1(channel) ((channel) >= 36 && (channel <= 48)) #define IS_CH_5G_BAND_2(channel) ((channel) >= 52 && (channel <= 64)) #define IS_CH_5G_BAND_3(channel) ((channel) >= 100 && (channel <= 144)) #define IS_CH_5G_BAND_4(channel) ((channel) >= 149 && (channel <= 177)) #define IS_CH_5G_BAND_MID(channel) \ (IS_CH_5G_BAND_2(channel) || IS_CH_5G_BAND_3(channel)) #define IS_CH_2G_BAND(channel) ((channel) <= 14) #define IS_CH_5G_BAND(channel) \ (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel) || \ IS_CH_5G_BAND_3(channel) || IS_CH_5G_BAND_4(channel)) enum rtw_supported_band { RTW_BAND_2G = BIT(NL80211_BAND_2GHZ), RTW_BAND_5G = BIT(NL80211_BAND_5GHZ), RTW_BAND_60G = BIT(NL80211_BAND_60GHZ), }; /* now, support up to 80M bw */ #define RTW_MAX_CHANNEL_WIDTH RTW_CHANNEL_WIDTH_80 enum rtw_bandwidth { RTW_CHANNEL_WIDTH_20 = 0, RTW_CHANNEL_WIDTH_40 = 1, RTW_CHANNEL_WIDTH_80 = 2, RTW_CHANNEL_WIDTH_160 = 3, RTW_CHANNEL_WIDTH_80_80 = 4, RTW_CHANNEL_WIDTH_5 = 5, RTW_CHANNEL_WIDTH_10 = 6, }; enum rtw_sc_offset { RTW_SC_DONT_CARE = 0, RTW_SC_20_UPPER = 1, RTW_SC_20_LOWER = 2, RTW_SC_20_UPMOST = 3, RTW_SC_20_LOWEST = 4, RTW_SC_40_UPPER = 9, RTW_SC_40_LOWER = 10, }; enum rtw_net_type { RTW_NET_NO_LINK = 0, RTW_NET_AD_HOC = 1, RTW_NET_MGD_LINKED = 2, RTW_NET_AP_MODE = 3, }; enum rtw_rf_type { RF_1T1R = 0, RF_1T2R = 1, RF_2T2R = 2, RF_2T3R = 3, RF_2T4R = 4, RF_3T3R = 5, RF_3T4R = 6, RF_4T4R = 7, RF_TYPE_MAX, }; enum rtw_rf_path { RF_PATH_A = 0, RF_PATH_B = 1, RF_PATH_C = 2, RF_PATH_D = 3, }; enum rtw_bb_path { BB_PATH_A = BIT(0), BB_PATH_B = BIT(1), BB_PATH_C = BIT(2), BB_PATH_D = BIT(3), BB_PATH_AB = (BB_PATH_A | BB_PATH_B), BB_PATH_AC = (BB_PATH_A | BB_PATH_C), BB_PATH_AD = (BB_PATH_A | BB_PATH_D), BB_PATH_BC = (BB_PATH_B | BB_PATH_C), BB_PATH_BD = (BB_PATH_B | BB_PATH_D), BB_PATH_CD = (BB_PATH_C | BB_PATH_D), BB_PATH_ABC = (BB_PATH_A | BB_PATH_B | BB_PATH_C), BB_PATH_ABD = (BB_PATH_A | BB_PATH_B | BB_PATH_D), BB_PATH_ACD = (BB_PATH_A | BB_PATH_C | BB_PATH_D), BB_PATH_BCD = (BB_PATH_B | BB_PATH_C | BB_PATH_D), BB_PATH_ABCD = (BB_PATH_A | BB_PATH_B | BB_PATH_C | BB_PATH_D), }; enum rtw_rate_section { RTW_RATE_SECTION_CCK = 0, RTW_RATE_SECTION_OFDM, RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_1S, RTW_RATE_SECTION_VHT_2S, /* keep last */ RTW_RATE_SECTION_MAX, }; enum rtw_wireless_set { WIRELESS_CCK = 0x00000001, WIRELESS_OFDM = 0x00000002, WIRELESS_HT = 0x00000004, WIRELESS_VHT = 0x00000008, }; #define HT_STBC_EN BIT(0) #define VHT_STBC_EN BIT(1) #define HT_LDPC_EN BIT(0) #define VHT_LDPC_EN BIT(1) enum rtw_chip_type { RTW_CHIP_TYPE_8822B, RTW_CHIP_TYPE_8822C, RTW_CHIP_TYPE_8723D, RTW_CHIP_TYPE_8821C, RTW_CHIP_TYPE_8703B, }; enum rtw_tx_queue_type { /* the order of AC queues matters */ RTW_TX_QUEUE_BK = 0x0, RTW_TX_QUEUE_BE = 0x1, RTW_TX_QUEUE_VI = 0x2, RTW_TX_QUEUE_VO = 0x3, RTW_TX_QUEUE_BCN = 0x4, RTW_TX_QUEUE_MGMT = 0x5, RTW_TX_QUEUE_HI0 = 0x6, RTW_TX_QUEUE_H2C = 0x7, /* keep it last */ RTK_MAX_TX_QUEUE_NUM }; enum rtw_rx_queue_type { RTW_RX_QUEUE_MPDU = 0x0, RTW_RX_QUEUE_C2H = 0x1, /* keep it last */ RTK_MAX_RX_QUEUE_NUM }; enum rtw_fw_type { RTW_NORMAL_FW = 0x0, RTW_WOWLAN_FW = 0x1, }; enum rtw_rate_index { RTW_RATEID_BGN_40M_2SS = 0, RTW_RATEID_BGN_40M_1SS = 1, RTW_RATEID_BGN_20M_2SS = 2, RTW_RATEID_BGN_20M_1SS = 3, RTW_RATEID_GN_N2SS = 4, RTW_RATEID_GN_N1SS = 5, RTW_RATEID_BG = 6, RTW_RATEID_G = 7, RTW_RATEID_B_20M = 8, RTW_RATEID_ARFR0_AC_2SS = 9, RTW_RATEID_ARFR1_AC_1SS = 10, RTW_RATEID_ARFR2_AC_2G_1SS = 11, RTW_RATEID_ARFR3_AC_2G_2SS = 12, RTW_RATEID_ARFR4_AC_3SS = 13, RTW_RATEID_ARFR5_N_3SS = 14, RTW_RATEID_ARFR7_N_4SS = 15, RTW_RATEID_ARFR6_AC_4SS = 16 }; enum rtw_trx_desc_rate { DESC_RATE1M = 0x00, DESC_RATE2M = 0x01, DESC_RATE5_5M = 0x02, DESC_RATE11M = 0x03, DESC_RATE6M = 0x04, DESC_RATE9M = 0x05, DESC_RATE12M = 0x06, DESC_RATE18M = 0x07, DESC_RATE24M = 0x08, DESC_RATE36M = 0x09, DESC_RATE48M = 0x0a, DESC_RATE54M = 0x0b, DESC_RATEMCS0 = 0x0c, DESC_RATEMCS1 = 0x0d, DESC_RATEMCS2 = 0x0e, DESC_RATEMCS3 = 0x0f, DESC_RATEMCS4 = 0x10, DESC_RATEMCS5 = 0x11, DESC_RATEMCS6 = 0x12, DESC_RATEMCS7 = 0x13, DESC_RATEMCS8 = 0x14, DESC_RATEMCS9 = 0x15, DESC_RATEMCS10 = 0x16, DESC_RATEMCS11 = 0x17, DESC_RATEMCS12 = 0x18, DESC_RATEMCS13 = 0x19, DESC_RATEMCS14 = 0x1a, DESC_RATEMCS15 = 0x1b, DESC_RATEMCS16 = 0x1c, DESC_RATEMCS17 = 0x1d, DESC_RATEMCS18 = 0x1e, DESC_RATEMCS19 = 0x1f, DESC_RATEMCS20 = 0x20, DESC_RATEMCS21 = 0x21, DESC_RATEMCS22 = 0x22, DESC_RATEMCS23 = 0x23, DESC_RATEMCS24 = 0x24, DESC_RATEMCS25 = 0x25, DESC_RATEMCS26 = 0x26, DESC_RATEMCS27 = 0x27, DESC_RATEMCS28 = 0x28, DESC_RATEMCS29 = 0x29, DESC_RATEMCS30 = 0x2a, DESC_RATEMCS31 = 0x2b, DESC_RATEVHT1SS_MCS0 = 0x2c, DESC_RATEVHT1SS_MCS1 = 0x2d, DESC_RATEVHT1SS_MCS2 = 0x2e, DESC_RATEVHT1SS_MCS3 = 0x2f, DESC_RATEVHT1SS_MCS4 = 0x30, DESC_RATEVHT1SS_MCS5 = 0x31, DESC_RATEVHT1SS_MCS6 = 0x32, DESC_RATEVHT1SS_MCS7 = 0x33, DESC_RATEVHT1SS_MCS8 = 0x34, DESC_RATEVHT1SS_MCS9 = 0x35, DESC_RATEVHT2SS_MCS0 = 0x36, DESC_RATEVHT2SS_MCS1 = 0x37, DESC_RATEVHT2SS_MCS2 = 0x38, DESC_RATEVHT2SS_MCS3 = 0x39, DESC_RATEVHT2SS_MCS4 = 0x3a, DESC_RATEVHT2SS_MCS5 = 0x3b, DESC_RATEVHT2SS_MCS6 = 0x3c, DESC_RATEVHT2SS_MCS7 = 0x3d, DESC_RATEVHT2SS_MCS8 = 0x3e, DESC_RATEVHT2SS_MCS9 = 0x3f, DESC_RATEVHT3SS_MCS0 = 0x40, DESC_RATEVHT3SS_MCS1 = 0x41, DESC_RATEVHT3SS_MCS2 = 0x42, DESC_RATEVHT3SS_MCS3 = 0x43, DESC_RATEVHT3SS_MCS4 = 0x44, DESC_RATEVHT3SS_MCS5 = 0x45, DESC_RATEVHT3SS_MCS6 = 0x46, DESC_RATEVHT3SS_MCS7 = 0x47, DESC_RATEVHT3SS_MCS8 = 0x48, DESC_RATEVHT3SS_MCS9 = 0x49, DESC_RATEVHT4SS_MCS0 = 0x4a, DESC_RATEVHT4SS_MCS1 = 0x4b, DESC_RATEVHT4SS_MCS2 = 0x4c, DESC_RATEVHT4SS_MCS3 = 0x4d, DESC_RATEVHT4SS_MCS4 = 0x4e, DESC_RATEVHT4SS_MCS5 = 0x4f, DESC_RATEVHT4SS_MCS6 = 0x50, DESC_RATEVHT4SS_MCS7 = 0x51, DESC_RATEVHT4SS_MCS8 = 0x52, DESC_RATEVHT4SS_MCS9 = 0x53, DESC_RATE_MAX, }; enum rtw_regulatory_domains { RTW_REGD_FCC = 0, RTW_REGD_MKK = 1, RTW_REGD_ETSI = 2, RTW_REGD_IC = 3, RTW_REGD_KCC = 4, RTW_REGD_ACMA = 5, RTW_REGD_CHILE = 6, RTW_REGD_UKRAINE = 7, RTW_REGD_MEXICO = 8, RTW_REGD_CN = 9, RTW_REGD_QATAR = 10, RTW_REGD_UK = 11, RTW_REGD_WW, RTW_REGD_MAX }; enum rtw_txq_flags { RTW_TXQ_AMPDU, RTW_TXQ_BLOCK_BA, }; enum rtw_flags { RTW_FLAG_RUNNING, RTW_FLAG_FW_RUNNING, RTW_FLAG_SCANNING, RTW_FLAG_POWERON, RTW_FLAG_LEISURE_PS, RTW_FLAG_LEISURE_PS_DEEP, RTW_FLAG_DIG_DISABLE, RTW_FLAG_BUSY_TRAFFIC, RTW_FLAG_WOWLAN, RTW_FLAG_RESTARTING, RTW_FLAG_RESTART_TRIGGERING, RTW_FLAG_FORCE_LOWEST_RATE, NUM_OF_RTW_FLAGS, }; enum rtw_evm { RTW_EVM_OFDM = 0, RTW_EVM_1SS, RTW_EVM_2SS_A, RTW_EVM_2SS_B, /* keep it last */ RTW_EVM_NUM }; enum rtw_snr { RTW_SNR_OFDM_A = 0, RTW_SNR_OFDM_B, RTW_SNR_OFDM_C, RTW_SNR_OFDM_D, RTW_SNR_1SS_A, RTW_SNR_1SS_B, RTW_SNR_1SS_C, RTW_SNR_1SS_D, RTW_SNR_2SS_A, RTW_SNR_2SS_B, RTW_SNR_2SS_C, RTW_SNR_2SS_D, /* keep it last */ RTW_SNR_NUM }; enum rtw_port { RTW_PORT_0 = 0, RTW_PORT_1 = 1, RTW_PORT_2 = 2, RTW_PORT_3 = 3, RTW_PORT_4 = 4, RTW_PORT_NUM }; enum rtw_wow_flags { RTW_WOW_FLAG_EN_MAGIC_PKT, RTW_WOW_FLAG_EN_REKEY_PKT, RTW_WOW_FLAG_EN_DISCONNECT, /* keep it last */ RTW_WOW_FLAG_MAX, }; /* the power index is represented by differences, which cck-1s & ht40-1s are * the base values, so for 1s's differences, there are only ht20 & ofdm */ struct rtw_2g_1s_pwr_idx_diff { #ifdef __LITTLE_ENDIAN s8 ofdm:4; s8 bw20:4; #else s8 bw20:4; s8 ofdm:4; #endif } __packed; struct rtw_2g_ns_pwr_idx_diff { #ifdef __LITTLE_ENDIAN s8 bw20:4; s8 bw40:4; s8 cck:4; s8 ofdm:4; #else s8 ofdm:4; s8 cck:4; s8 bw40:4; s8 bw20:4; #endif } __packed; struct rtw_2g_txpwr_idx { u8 cck_base[6]; u8 bw40_base[5]; struct rtw_2g_1s_pwr_idx_diff ht_1s_diff; struct rtw_2g_ns_pwr_idx_diff ht_2s_diff; struct rtw_2g_ns_pwr_idx_diff ht_3s_diff; struct rtw_2g_ns_pwr_idx_diff ht_4s_diff; }; struct rtw_5g_ht_1s_pwr_idx_diff { #ifdef __LITTLE_ENDIAN s8 ofdm:4; s8 bw20:4; #else s8 bw20:4; s8 ofdm:4; #endif } __packed; struct rtw_5g_ht_ns_pwr_idx_diff { #ifdef __LITTLE_ENDIAN s8 bw20:4; s8 bw40:4; #else s8 bw40:4; s8 bw20:4; #endif } __packed; struct rtw_5g_ofdm_ns_pwr_idx_diff { #ifdef __LITTLE_ENDIAN s8 ofdm_3s:4; s8 ofdm_2s:4; s8 ofdm_4s:4; s8 res:4; #else s8 res:4; s8 ofdm_4s:4; s8 ofdm_2s:4; s8 ofdm_3s:4; #endif } __packed; struct rtw_5g_vht_ns_pwr_idx_diff { #ifdef __LITTLE_ENDIAN s8 bw160:4; s8 bw80:4; #else s8 bw80:4; s8 bw160:4; #endif } __packed; struct rtw_5g_txpwr_idx { u8 bw40_base[14]; struct rtw_5g_ht_1s_pwr_idx_diff ht_1s_diff; struct rtw_5g_ht_ns_pwr_idx_diff ht_2s_diff; struct rtw_5g_ht_ns_pwr_idx_diff ht_3s_diff; struct rtw_5g_ht_ns_pwr_idx_diff ht_4s_diff; struct rtw_5g_ofdm_ns_pwr_idx_diff ofdm_diff; struct rtw_5g_vht_ns_pwr_idx_diff vht_1s_diff; struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff; struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff; struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff; }; struct rtw_txpwr_idx { struct rtw_2g_txpwr_idx pwr_idx_2g; struct rtw_5g_txpwr_idx pwr_idx_5g; }; struct rtw_channel_params { u8 center_chan; u8 primary_chan; u8 bandwidth; }; struct rtw_hw_reg { u32 addr; u32 mask; }; struct rtw_hw_reg_desc { u32 addr; u32 mask; const char *desc; }; struct rtw_ltecoex_addr { u32 ctrl; u32 wdata; u32 rdata; }; struct rtw_reg_domain { u32 addr; u32 mask; #define RTW_REG_DOMAIN_MAC32 0 #define RTW_REG_DOMAIN_MAC16 1 #define RTW_REG_DOMAIN_MAC8 2 #define RTW_REG_DOMAIN_RF_A 3 #define RTW_REG_DOMAIN_RF_B 4 #define RTW_REG_DOMAIN_NL 0xFF u8 domain; }; struct rtw_rf_sipi_addr { u32 hssi_1; u32 hssi_2; u32 lssi_read; u32 lssi_read_pi; }; struct rtw_hw_reg_offset { struct rtw_hw_reg hw_reg; u8 offset; }; struct rtw_backup_info { u8 len; u32 reg; u32 val; }; enum rtw_vif_port_set { PORT_SET_MAC_ADDR = BIT(0), PORT_SET_BSSID = BIT(1), PORT_SET_NET_TYPE = BIT(2), PORT_SET_AID = BIT(3), PORT_SET_BCN_CTRL = BIT(4), }; struct rtw_vif_port { struct rtw_hw_reg mac_addr; struct rtw_hw_reg bssid; struct rtw_hw_reg net_type; struct rtw_hw_reg aid; struct rtw_hw_reg bcn_ctrl; }; struct rtw_tx_pkt_info { u32 tx_pkt_size; u8 offset; u8 pkt_offset; u8 tim_offset; u8 mac_id; u8 rate_id; u8 rate; u8 qsel; u8 bw; u8 sec_type; u8 sn; bool ampdu_en; u8 ampdu_factor; u8 ampdu_density; u16 seq; bool stbc; bool ldpc; bool dis_rate_fallback; bool bmc; bool use_rate; bool ls; bool fs; bool short_gi; bool report; bool rts; bool dis_qselseq; bool en_hwseq; u8 hw_ssn_sel; bool nav_use_hdr; bool bt_null; }; struct rtw_rx_pkt_stat { bool phy_status; bool icv_err; bool crc_err; bool decrypted; bool is_c2h; bool channel_invalid; s32 signal_power; u16 pkt_len; u8 bw; u8 drv_info_sz; u8 shift; u8 rate; u8 mac_id; u8 cam_id; u8 ppdu_cnt; u32 tsf_low; s8 rx_power[RTW_RF_PATH_MAX]; u8 rssi; u8 rxsc; s8 rx_snr[RTW_RF_PATH_MAX]; u8 rx_evm[RTW_RF_PATH_MAX]; s8 cfo_tail[RTW_RF_PATH_MAX]; u16 freq; u8 band; struct rtw_sta_info *si; struct ieee80211_vif *vif; struct ieee80211_hdr *hdr; }; DECLARE_EWMA(tp, 10, 2); struct rtw_traffic_stats { /* units in bytes */ u64 tx_unicast; u64 rx_unicast; /* count for packets */ u64 tx_cnt; u64 rx_cnt; /* units in Mbps */ u32 tx_throughput; u32 rx_throughput; struct ewma_tp tx_ewma_tp; struct ewma_tp rx_ewma_tp; }; enum rtw_lps_mode { RTW_MODE_ACTIVE = 0, RTW_MODE_LPS = 1, RTW_MODE_WMM_PS = 2, }; enum rtw_lps_deep_mode { LPS_DEEP_MODE_NONE = 0, LPS_DEEP_MODE_LCLK = 1, LPS_DEEP_MODE_PG = 2, }; enum rtw_pwr_state { RTW_RF_OFF = 0x0, RTW_RF_ON = 0x4, RTW_ALL_ON = 0xc, }; struct rtw_lps_conf { enum rtw_lps_mode mode; enum rtw_lps_deep_mode deep_mode; enum rtw_lps_deep_mode wow_deep_mode; enum rtw_pwr_state state; u8 awake_interval; u8 rlbm; u8 smart_ps; u8 port_id; bool sec_cam_backup; bool pattern_cam_backup; }; enum rtw_hw_key_type { RTW_CAM_NONE = 0, RTW_CAM_WEP40 = 1, RTW_CAM_TKIP = 2, RTW_CAM_AES = 4, RTW_CAM_WEP104 = 5, }; struct rtw_cam_entry { bool valid; bool group; u8 addr[ETH_ALEN]; u8 hw_key_type; struct ieee80211_key_conf *key; }; struct rtw_sec_desc { /* search strategy */ bool default_key_search; u32 total_cam_num; struct rtw_cam_entry cam_table[RTW_MAX_SEC_CAM_NUM]; DECLARE_BITMAP(cam_map, RTW_MAX_SEC_CAM_NUM); }; struct rtw_tx_report { /* protect the tx report queue */ spinlock_t q_lock; struct sk_buff_head queue; atomic_t sn; struct timer_list purge_timer; }; struct rtw_ra_report { struct rate_info txrate; u32 bit_rate; u8 desc_rate; }; struct rtw_txq { struct list_head list; unsigned long flags; }; DECLARE_EWMA(rssi, 10, 16); struct rtw_sta_info { struct rtw_dev *rtwdev; struct ieee80211_sta *sta; struct ieee80211_vif *vif; struct ewma_rssi avg_rssi; u8 rssi_level; u8 mac_id; u8 rate_id; enum rtw_bandwidth bw_mode; enum rtw_rf_type rf_type; u8 stbc_en:2; u8 ldpc_en:2; bool sgi_enable; bool vht_enable; u8 init_ra_lv; u64 ra_mask; DECLARE_BITMAP(tid_ba, IEEE80211_NUM_TIDS); struct rtw_ra_report ra_report; bool use_cfg_mask; struct cfg80211_bitrate_mask *mask; struct work_struct rc_work; }; enum rtw_bfee_role { RTW_BFEE_NONE, RTW_BFEE_SU, RTW_BFEE_MU }; struct rtw_bfee { enum rtw_bfee_role role; u16 p_aid; u8 g_id; u8 mac_addr[ETH_ALEN]; u8 sound_dim; /* SU-MIMO */ u8 su_reg_index; /* MU-MIMO */ u16 aid; }; struct rtw_bf_info { u8 bfer_mu_cnt; u8 bfer_su_cnt; DECLARE_BITMAP(bfer_su_reg_maping, 2); u8 cur_csi_rpt_rate; }; struct rtw_vif { enum rtw_net_type net_type; u16 aid; u8 mac_id; u8 mac_addr[ETH_ALEN]; u8 bssid[ETH_ALEN]; u8 port; u8 bcn_ctrl; struct list_head rsvd_page_list; struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS]; const struct rtw_vif_port *conf; struct cfg80211_scan_request *scan_req; struct ieee80211_scan_ies *scan_ies; struct rtw_traffic_stats stats; struct rtw_bfee bfee; }; struct rtw_regulatory { char alpha2[2]; u8 txpwr_regd_2g; u8 txpwr_regd_5g; }; enum rtw_regd_state { RTW_REGD_STATE_WORLDWIDE, RTW_REGD_STATE_PROGRAMMED, RTW_REGD_STATE_SETTING, RTW_REGD_STATE_NR, }; struct rtw_regd { enum rtw_regd_state state; const struct rtw_regulatory *regulatory; enum nl80211_dfs_regions dfs_region; }; struct rtw_chip_ops { int (*mac_init)(struct rtw_dev *rtwdev); int (*dump_fw_crash)(struct rtw_dev *rtwdev); void (*shutdown)(struct rtw_dev *rtwdev); int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map); void (*phy_set_param)(struct rtw_dev *rtwdev); void (*set_channel)(struct rtw_dev *rtwdev, u8 channel, u8 bandwidth, u8 primary_chan_idx); void (*query_rx_desc)(struct rtw_dev *rtwdev, u8 *rx_desc, struct rtw_rx_pkt_stat *pkt_stat, struct ieee80211_rx_status *rx_status); u32 (*read_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, u32 addr, u32 mask); bool (*write_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, u32 addr, u32 mask, u32 data); void (*set_tx_power_index)(struct rtw_dev *rtwdev); int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset, u32 size); int (*set_antenna)(struct rtw_dev *rtwdev, u32 antenna_tx, u32 antenna_rx); void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable); void (*efuse_grant)(struct rtw_dev *rtwdev, bool enable); void (*false_alarm_statistics)(struct rtw_dev *rtwdev); void (*phy_calibration)(struct rtw_dev *rtwdev); void (*dpk_track)(struct rtw_dev *rtwdev); void (*cck_pd_set)(struct rtw_dev *rtwdev, u8 level); void (*pwr_track)(struct rtw_dev *rtwdev); void (*config_bfee)(struct rtw_dev *rtwdev, struct rtw_vif *vif, struct rtw_bfee *bfee, bool enable); void (*set_gid_table)(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf); void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, u8 fixrate_en, u8 *new_rate); void (*adaptivity_init)(struct rtw_dev *rtwdev); void (*adaptivity)(struct rtw_dev *rtwdev); void (*cfo_init)(struct rtw_dev *rtwdev); void (*cfo_track)(struct rtw_dev *rtwdev); void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path, enum rtw_bb_path tx_path_1ss, enum rtw_bb_path tx_path_cck, bool is_tx2_path); void (*config_txrx_mode)(struct rtw_dev *rtwdev, u8 tx_path, u8 rx_path, bool is_tx2_path); /* for USB/SDIO only */ void (*fill_txdesc_checksum)(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, u8 *txdesc); /* for coex */ void (*coex_set_init)(struct rtw_dev *rtwdev); void (*coex_set_ant_switch)(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type); void (*coex_set_gnt_fix)(struct rtw_dev *rtwdev); void (*coex_set_gnt_debug)(struct rtw_dev *rtwdev); void (*coex_set_rfe_type)(struct rtw_dev *rtwdev); void (*coex_set_wl_tx_power)(struct rtw_dev *rtwdev, u8 wl_pwr); void (*coex_set_wl_rx_gain)(struct rtw_dev *rtwdev, bool low_gain); }; #define RTW_PWR_POLLING_CNT 20000 #define RTW_PWR_CMD_READ 0x00 #define RTW_PWR_CMD_WRITE 0x01 #define RTW_PWR_CMD_POLLING 0x02 #define RTW_PWR_CMD_DELAY 0x03 #define RTW_PWR_CMD_END 0x04 /* define the base address of each block */ #define RTW_PWR_ADDR_MAC 0x00 #define RTW_PWR_ADDR_USB 0x01 #define RTW_PWR_ADDR_PCIE 0x02 #define RTW_PWR_ADDR_SDIO 0x03 #define RTW_PWR_INTF_SDIO_MSK BIT(0) #define RTW_PWR_INTF_USB_MSK BIT(1) #define RTW_PWR_INTF_PCI_MSK BIT(2) #define RTW_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) #define RTW_PWR_CUT_TEST_MSK BIT(0) #define RTW_PWR_CUT_A_MSK BIT(1) #define RTW_PWR_CUT_B_MSK BIT(2) #define RTW_PWR_CUT_C_MSK BIT(3) #define RTW_PWR_CUT_D_MSK BIT(4) #define RTW_PWR_CUT_E_MSK BIT(5) #define RTW_PWR_CUT_F_MSK BIT(6) #define RTW_PWR_CUT_G_MSK BIT(7) #define RTW_PWR_CUT_ALL_MSK 0xFF enum rtw_pwr_seq_cmd_delay_unit { RTW_PWR_DELAY_US, RTW_PWR_DELAY_MS, }; struct rtw_pwr_seq_cmd { u16 offset; u8 cut_mask; u8 intf_mask; u8 base:4; u8 cmd:4; u8 mask; u8 value; }; enum rtw_chip_ver { RTW_CHIP_VER_CUT_A = 0x00, RTW_CHIP_VER_CUT_B = 0x01, RTW_CHIP_VER_CUT_C = 0x02, RTW_CHIP_VER_CUT_D = 0x03, RTW_CHIP_VER_CUT_E = 0x04, RTW_CHIP_VER_CUT_F = 0x05, RTW_CHIP_VER_CUT_G = 0x06, }; #define RTW_INTF_PHY_PLATFORM_ALL 0 enum rtw_intf_phy_cut { RTW_INTF_PHY_CUT_A = BIT(0), RTW_INTF_PHY_CUT_B = BIT(1), RTW_INTF_PHY_CUT_C = BIT(2), RTW_INTF_PHY_CUT_D = BIT(3), RTW_INTF_PHY_CUT_E = BIT(4), RTW_INTF_PHY_CUT_F = BIT(5), RTW_INTF_PHY_CUT_G = BIT(6), RTW_INTF_PHY_CUT_ALL = 0xFFFF, }; enum rtw_ip_sel { RTW_IP_SEL_PHY = 0, RTW_IP_SEL_MAC = 1, RTW_IP_SEL_DBI = 2, RTW_IP_SEL_UNDEF = 0xFFFF }; enum rtw_pq_map_id { RTW_PQ_MAP_VO = 0x0, RTW_PQ_MAP_VI = 0x1, RTW_PQ_MAP_BE = 0x2, RTW_PQ_MAP_BK = 0x3, RTW_PQ_MAP_MG = 0x4, RTW_PQ_MAP_HI = 0x5, RTW_PQ_MAP_NUM = 0x6, RTW_PQ_MAP_UNDEF, }; enum rtw_dma_mapping { RTW_DMA_MAPPING_EXTRA = 0, RTW_DMA_MAPPING_LOW = 1, RTW_DMA_MAPPING_NORMAL = 2, RTW_DMA_MAPPING_HIGH = 3, RTW_DMA_MAPPING_MAX, RTW_DMA_MAPPING_UNDEF, }; struct rtw_rqpn { enum rtw_dma_mapping dma_map_vo; enum rtw_dma_mapping dma_map_vi; enum rtw_dma_mapping dma_map_be; enum rtw_dma_mapping dma_map_bk; enum rtw_dma_mapping dma_map_mg; enum rtw_dma_mapping dma_map_hi; }; struct rtw_prioq_addr { u32 rsvd; u32 avail; }; struct rtw_prioq_addrs { struct rtw_prioq_addr prio[RTW_DMA_MAPPING_MAX]; bool wsize; }; struct rtw_page_table { u16 hq_num; u16 nq_num; u16 lq_num; u16 exq_num; u16 gapq_num; }; struct rtw_intf_phy_para { u16 offset; u16 value; u16 ip_sel; u16 cut_mask; u16 platform; }; struct rtw_wow_pattern { u16 crc; u8 type; u8 valid; u8 mask[RTW_MAX_PATTERN_MASK_SIZE]; }; struct rtw_pno_request { bool inited; u32 match_set_cnt; struct cfg80211_match_set *match_sets; u8 channel_cnt; struct ieee80211_channel *channels; struct cfg80211_sched_scan_plan scan_plan; }; struct rtw_wow_param { struct ieee80211_vif *wow_vif; DECLARE_BITMAP(flags, RTW_WOW_FLAG_MAX); u8 txpause; u8 pattern_cnt; struct rtw_wow_pattern patterns[RTW_MAX_PATTERN_NUM]; bool ips_enabled; struct rtw_pno_request pno_req; }; struct rtw_intf_phy_para_table { const struct rtw_intf_phy_para *usb2_para; const struct rtw_intf_phy_para *usb3_para; const struct rtw_intf_phy_para *gen1_para; const struct rtw_intf_phy_para *gen2_para; u8 n_usb2_para; u8 n_usb3_para; u8 n_gen1_para; u8 n_gen2_para; }; struct rtw_table { const void *data; const u32 size; void (*parse)(struct rtw_dev *rtwdev, const struct rtw_table *tbl); void (*do_cfg)(struct rtw_dev *rtwdev, const struct rtw_table *tbl, u32 addr, u32 data); enum rtw_rf_path rf_path; }; static inline void rtw_load_table(struct rtw_dev *rtwdev, const struct rtw_table *tbl) { (*tbl->parse)(rtwdev, tbl); } enum rtw_rfe_fem { RTW_RFE_IFEM, RTW_RFE_EFEM, RTW_RFE_IFEM2G_EFEM5G, RTW_RFE_NUM, }; struct rtw_rfe_def { const struct rtw_table *phy_pg_tbl; const struct rtw_table *txpwr_lmt_tbl; const struct rtw_table *agc_btg_tbl; }; #define RTW_DEF_RFE(chip, bb_pg, pwrlmt) { \ .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \ .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \ } #define RTW_DEF_RFE_EXT(chip, bb_pg, pwrlmt, btg) { \ .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \ .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \ .agc_btg_tbl = &rtw ## chip ## _agc_btg_type ## btg ## _tbl, \ } #define RTW_PWR_TRK_5G_1 0 #define RTW_PWR_TRK_5G_2 1 #define RTW_PWR_TRK_5G_3 2 #define RTW_PWR_TRK_5G_NUM 3 #define RTW_PWR_TRK_TBL_SZ 30 /* This table stores the values of TX power that will be adjusted by power * tracking. * * For 5G bands, there are 3 different settings. * For 2G there are cck rate and ofdm rate with different settings. */ struct rtw_pwr_track_tbl { const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_2gb_n; const u8 *pwrtrk_2gb_p; const u8 *pwrtrk_2ga_n; const u8 *pwrtrk_2ga_p; const u8 *pwrtrk_2g_cckb_n; const u8 *pwrtrk_2g_cckb_p; const u8 *pwrtrk_2g_ccka_n; const u8 *pwrtrk_2g_ccka_p; const s8 *pwrtrk_xtal_n; const s8 *pwrtrk_xtal_p; }; enum rtw_wlan_cpu { RTW_WCPU_11AC, RTW_WCPU_11N, }; enum rtw_fw_fifo_sel { RTW_FW_FIFO_SEL_TX, RTW_FW_FIFO_SEL_RX, RTW_FW_FIFO_SEL_RSVD_PAGE, RTW_FW_FIFO_SEL_REPORT, RTW_FW_FIFO_SEL_LLT, RTW_FW_FIFO_SEL_RXBUF_FW, RTW_FW_FIFO_MAX, }; enum rtw_fwcd_item { RTW_FWCD_TLV, RTW_FWCD_REG, RTW_FWCD_ROM, RTW_FWCD_IMEM, RTW_FWCD_DMEM, RTW_FWCD_EMEM, }; /* hardware configuration for each IC */ struct rtw_chip_info { struct rtw_chip_ops *ops; u8 id; const char *fw_name; enum rtw_wlan_cpu wlan_cpu; u8 tx_pkt_desc_sz; u8 tx_buf_desc_sz; u8 rx_pkt_desc_sz; u8 rx_buf_desc_sz; u32 phy_efuse_size; u32 log_efuse_size; u32 ptct_efuse_size; u32 txff_size; u32 rxff_size; u32 fw_rxff_size; u16 rsvd_drv_pg_num; u8 band; u8 page_size; u8 csi_buf_pg_num; u8 dig_max; u8 dig_min; u8 txgi_factor; bool is_pwr_by_rate_dec; bool rx_ldpc; bool tx_stbc; u8 max_power_index; u8 ampdu_density; u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; const struct rtw_fwcd_segs *fwcd_segs; u8 usb_tx_agg_desc_num; u8 default_1ss_tx_path; bool path_div_supported; bool ht_supported; bool vht_supported; u8 lps_deep_mode_supported; /* init values */ u8 sys_func_en; const struct rtw_pwr_seq_cmd **pwr_on_seq; const struct rtw_pwr_seq_cmd **pwr_off_seq; const struct rtw_rqpn *rqpn_table; const struct rtw_prioq_addrs *prioq_addrs; const struct rtw_page_table *page_table; const struct rtw_intf_phy_para_table *intf_table; const struct rtw_hw_reg *dig; const struct rtw_hw_reg *dig_cck; u32 rf_base_addr[2]; u32 rf_sipi_addr[2]; const struct rtw_rf_sipi_addr *rf_sipi_read_addr; u8 fix_rf_phy_num; const struct rtw_ltecoex_addr *ltecoex_addr; const struct rtw_table *mac_tbl; const struct rtw_table *agc_tbl; const struct rtw_table *bb_tbl; const struct rtw_table *rf_tbl[RTW_RF_PATH_MAX]; const struct rtw_table *rfk_init_tbl; const struct rtw_rfe_def *rfe_defs; u32 rfe_defs_size; bool en_dis_dpd; u16 dpd_ratemask; u8 iqk_threshold; u8 lck_threshold; const struct rtw_pwr_track_tbl *pwr_track_tbl; u8 bfer_su_max_num; u8 bfer_mu_max_num; struct rtw_hw_reg_offset *edcca_th; s8 l2h_th_ini_cs; s8 l2h_th_ini_ad; const char *wow_fw_name; const struct wiphy_wowlan_support *wowlan_stub; const u8 max_sched_scan_ssids; const u16 max_scan_ie_len; /* coex paras */ u32 coex_para_ver; u8 bt_desired_ver; bool scbd_support; bool new_scbd10_def; /* true: fix 2M(8822c) */ bool ble_hid_profile_support; bool wl_mimo_ps_support; u8 pstdma_type; /* 0: LPSoff, 1:LPSon */ u8 bt_rssi_type; u8 ant_isolation; u8 rssi_tolerance; u8 table_sant_num; u8 table_nsant_num; u8 tdma_sant_num; u8 tdma_nsant_num; u8 bt_afh_span_bw20; u8 bt_afh_span_bw40; u8 afh_5g_num; u8 wl_rf_para_num; u8 coex_info_hw_regs_num; const u8 *bt_rssi_step; const u8 *wl_rssi_step; const struct coex_table_para *table_nsant; const struct coex_table_para *table_sant; const struct coex_tdma_para *tdma_sant; const struct coex_tdma_para *tdma_nsant; const struct coex_rf_para *wl_rf_para_tx; const struct coex_rf_para *wl_rf_para_rx; const struct coex_5g_afh_map *afh_5g; const struct rtw_hw_reg *btg_reg; const struct rtw_reg_domain *coex_info_hw_regs; u32 wl_fw_desired_ver; }; enum rtw_coex_bt_state_cnt { COEX_CNT_BT_RETRY, COEX_CNT_BT_REINIT, COEX_CNT_BT_REENABLE, COEX_CNT_BT_POPEVENT, COEX_CNT_BT_SETUPLINK, COEX_CNT_BT_IGNWLANACT, COEX_CNT_BT_INQ, COEX_CNT_BT_PAGE, COEX_CNT_BT_ROLESWITCH, COEX_CNT_BT_AFHUPDATE, COEX_CNT_BT_INFOUPDATE, COEX_CNT_BT_IQK, COEX_CNT_BT_IQKFAIL, COEX_CNT_BT_MAX }; enum rtw_coex_wl_state_cnt { COEX_CNT_WL_SCANAP, COEX_CNT_WL_CONNPKT, COEX_CNT_WL_COEXRUN, COEX_CNT_WL_NOISY0, COEX_CNT_WL_NOISY1, COEX_CNT_WL_NOISY2, COEX_CNT_WL_5MS_NOEXTEND, COEX_CNT_WL_FW_NOTIFY, COEX_CNT_WL_MAX }; struct rtw_coex_rfe { bool ant_switch_exist; bool ant_switch_diversity; bool ant_switch_with_bt; u8 rfe_module_type; u8 ant_switch_polarity; /* true if WLG at BTG, else at WLAG */ bool wlg_at_btg; }; #define COEX_WL_TDMA_PARA_LENGTH 5 struct rtw_coex_dm { bool cur_ps_tdma_on; bool cur_wl_rx_low_gain_en; bool ignore_wl_act; u8 reason; u8 bt_rssi_state[4]; u8 wl_rssi_state[4]; u8 wl_ch_info[3]; u8 cur_ps_tdma; u8 cur_table; u8 ps_tdma_para[5]; u8 cur_bt_pwr_lvl; u8 cur_bt_lna_lvl; u8 cur_wl_pwr_lvl; u8 bt_status; u32 cur_ant_pos_type; u32 cur_switch_status; u32 setting_tdma; u8 fw_tdma_para[COEX_WL_TDMA_PARA_LENGTH]; }; #define COEX_BTINFO_SRC_WL_FW 0x0 #define COEX_BTINFO_SRC_BT_RSP 0x1 #define COEX_BTINFO_SRC_BT_ACT 0x2 #define COEX_BTINFO_SRC_BT_IQK 0x3 #define COEX_BTINFO_SRC_BT_SCBD 0x4 #define COEX_BTINFO_SRC_H2C60 0x5 #define COEX_BTINFO_SRC_MAX 0x6 #define COEX_INFO_FTP BIT(7) #define COEX_INFO_A2DP BIT(6) #define COEX_INFO_HID BIT(5) #define COEX_INFO_SCO_BUSY BIT(4) #define COEX_INFO_ACL_BUSY BIT(3) #define COEX_INFO_INQ_PAGE BIT(2) #define COEX_INFO_SCO_ESCO BIT(1) #define COEX_INFO_CONNECTION BIT(0) #define COEX_BTINFO_LENGTH_MAX 10 #define COEX_BTINFO_LENGTH 7 #define COEX_BT_HIDINFO_LIST 0x0 #define COEX_BT_HIDINFO_A 0x1 #define COEX_BT_HIDINFO_NAME 3 #define COEX_BT_HIDINFO_LENGTH 6 #define COEX_BT_HIDINFO_HANDLE_NUM 4 #define COEX_BT_HIDINFO_C2H_HANDLE 0 #define COEX_BT_HIDINFO_C2H_VENDOR 1 #define COEX_BT_BLE_HANDLE_THRS 0x10 #define COEX_BT_HIDINFO_NOTCON 0xff struct rtw_coex_hid { u8 hid_handle; u8 hid_vendor; u8 hid_name[COEX_BT_HIDINFO_NAME]; bool hid_info_completed; bool is_game_hid; }; struct rtw_coex_hid_handle_list { u8 cmd_id; u8 len; u8 subid; u8 handle_cnt; u8 handle[COEX_BT_HIDINFO_HANDLE_NUM]; } __packed; struct rtw_coex_hid_info_a { u8 cmd_id; u8 len; u8 subid; u8 handle; u8 vendor; u8 name[COEX_BT_HIDINFO_NAME]; } __packed; struct rtw_coex_stat { bool bt_disabled; bool bt_disabled_pre; bool bt_link_exist; bool bt_whck_test; bool bt_inq_page; bool bt_inq_remain; bool bt_inq; bool bt_page; bool bt_ble_voice; bool bt_ble_exist; bool bt_hfp_exist; bool bt_a2dp_exist; bool bt_hid_exist; bool bt_pan_exist; /* PAN or OPP */ bool bt_opp_exist; /* OPP only */ bool bt_acl_busy; bool bt_fix_2M; bool bt_setup_link; bool bt_multi_link; bool bt_multi_link_pre; bool bt_multi_link_remain; bool bt_a2dp_sink; bool bt_a2dp_active; bool bt_reenable; bool bt_ble_scan_en; bool bt_init_scan; bool bt_slave; bool bt_418_hid_exist; bool bt_ble_hid_exist; bool bt_game_hid_exist; bool bt_hid_handle_cnt; bool bt_mailbox_reply; bool wl_under_lps; bool wl_under_ips; bool wl_hi_pri_task1; bool wl_hi_pri_task2; bool wl_force_lps_ctrl; bool wl_gl_busy; bool wl_linkscan_proc; bool wl_ps_state_fail; bool wl_tx_limit_en; bool wl_ampdu_limit_en; bool wl_connected; bool wl_slot_extend; bool wl_cck_lock; bool wl_cck_lock_pre; bool wl_cck_lock_ever; bool wl_connecting; bool wl_slot_toggle; bool wl_slot_toggle_change; /* if toggle to no-toggle */ bool wl_mimo_ps; u32 bt_supported_version; u32 bt_supported_feature; u32 hi_pri_tx; u32 hi_pri_rx; u32 lo_pri_tx; u32 lo_pri_rx; u32 patch_ver; u16 bt_reg_vendor_ae; u16 bt_reg_vendor_ac; s8 bt_rssi; u8 kt_ver; u8 gnt_workaround_state; u8 tdma_timer_base; u8 bt_profile_num; u8 bt_info_c2h[COEX_BTINFO_SRC_MAX][COEX_BTINFO_LENGTH_MAX]; u8 bt_info_lb2; u8 bt_info_lb3; u8 bt_info_hb0; u8 bt_info_hb1; u8 bt_info_hb2; u8 bt_info_hb3; u8 bt_ble_scan_type; u8 bt_hid_pair_num; u8 bt_hid_slot; u8 bt_a2dp_bitpool; u8 bt_iqk_state; u16 wl_beacon_interval; u8 wl_noisy_level; u8 wl_fw_dbg_info[10]; u8 wl_fw_dbg_info_pre[10]; u8 wl_rx_rate; u8 wl_tx_rate; u8 wl_rts_rx_rate; u8 wl_coex_mode; u8 wl_iot_peer; u8 ampdu_max_time; u8 wl_tput_dir; u8 wl_toggle_para[6]; u8 wl_toggle_interval; u16 score_board; u16 retry_limit; /* counters to record bt states */ u32 cnt_bt[COEX_CNT_BT_MAX]; /* counters to record wifi states */ u32 cnt_wl[COEX_CNT_WL_MAX]; /* counters to record bt c2h data */ u32 cnt_bt_info_c2h[COEX_BTINFO_SRC_MAX]; u32 darfrc; u32 darfrch; struct rtw_coex_hid hid_info[COEX_BT_HIDINFO_HANDLE_NUM]; struct rtw_coex_hid_handle_list hid_handle_list; }; struct rtw_coex { struct sk_buff_head queue; wait_queue_head_t wait; bool under_5g; bool stop_dm; bool freeze; bool freerun; bool wl_rf_off; bool manual_control; struct rtw_coex_stat stat; struct rtw_coex_dm dm; struct rtw_coex_rfe rfe; struct delayed_work bt_relink_work; struct delayed_work bt_reenable_work; struct delayed_work defreeze_work; struct delayed_work wl_remain_work; struct delayed_work bt_remain_work; struct delayed_work wl_connecting_work; struct delayed_work bt_multi_link_remain_work; struct delayed_work wl_ccklock_work; }; #define DPK_RF_REG_NUM 7 #define DPK_RF_PATH_NUM 2 #define DPK_BB_REG_NUM 18 #define DPK_CHANNEL_WIDTH_80 1 DECLARE_EWMA(thermal, 10, 4); struct rtw_dpk_info { bool is_dpk_pwr_on; bool is_reload; DECLARE_BITMAP(dpk_path_ok, DPK_RF_PATH_NUM); u8 thermal_dpk[DPK_RF_PATH_NUM]; struct ewma_thermal avg_thermal[DPK_RF_PATH_NUM]; u32 gnt_control; u32 gnt_value; u8 result[RTW_RF_PATH_MAX]; u8 dpk_txagc[RTW_RF_PATH_MAX]; u32 coef[RTW_RF_PATH_MAX][20]; u16 dpk_gs[RTW_RF_PATH_MAX]; u8 thermal_dpk_delta[RTW_RF_PATH_MAX]; u8 pre_pwsf[RTW_RF_PATH_MAX]; u8 dpk_band; u8 dpk_ch; u8 dpk_bw; }; struct rtw_phy_cck_pd_reg { u32 reg_pd; u32 mask_pd; u32 reg_cs; u32 mask_cs; }; #define DACK_MSBK_BACKUP_NUM 0xf #define DACK_DCK_BACKUP_NUM 0x2 struct rtw_swing_table { const u8 *p[RTW_RF_PATH_MAX]; const u8 *n[RTW_RF_PATH_MAX]; }; struct rtw_pkt_count { u16 num_bcn_pkt; u16 num_qry_pkt[DESC_RATE_MAX]; }; DECLARE_EWMA(evm, 10, 4); DECLARE_EWMA(snr, 10, 4); struct rtw_iqk_info { bool done; struct { u32 s1_x; u32 s1_y; u32 s0_x; u32 s0_y; } result; }; enum rtw_rf_band { RF_BAND_2G_CCK, RF_BAND_2G_OFDM, RF_BAND_5G_L, RF_BAND_5G_M, RF_BAND_5G_H, RF_BAND_MAX }; #define RF_GAIN_NUM 11 #define RF_HW_OFFSET_NUM 10 struct rtw_gapk_info { u32 rf3f_bp[RF_BAND_MAX][RF_GAIN_NUM][RTW_RF_PATH_MAX]; u32 rf3f_fs[RTW_RF_PATH_MAX][RF_GAIN_NUM]; bool txgapk_bp_done; s8 offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; s8 fianl_offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; u8 read_txgain; u8 channel; }; #define EDCCA_TH_L2H_IDX 0 #define EDCCA_TH_H2L_IDX 1 #define EDCCA_TH_L2H_LB 48 #define EDCCA_ADC_BACKOFF 12 #define EDCCA_IGI_BASE 50 #define EDCCA_IGI_L2H_DIFF 8 #define EDCCA_L2H_H2L_DIFF 7 #define EDCCA_L2H_H2L_DIFF_NORMAL 8 enum rtw_edcca_mode { RTW_EDCCA_NORMAL = 0, RTW_EDCCA_ADAPTIVITY = 1, }; struct rtw_cfo_track { bool is_adjust; u8 crystal_cap; s32 cfo_tail[RTW_RF_PATH_MAX]; s32 cfo_cnt[RTW_RF_PATH_MAX]; u32 packet_count; u32 packet_count_pre; }; #define RRSR_INIT_2G 0x15f #define RRSR_INIT_5G 0x150 enum rtw_dm_cap { RTW_DM_CAP_NA, RTW_DM_CAP_TXGAPK, RTW_DM_CAP_NUM }; struct rtw_dm_info { u32 cck_fa_cnt; u32 ofdm_fa_cnt; u32 total_fa_cnt; u32 cck_cca_cnt; u32 ofdm_cca_cnt; u32 total_cca_cnt; u32 cck_ok_cnt; u32 cck_err_cnt; u32 ofdm_ok_cnt; u32 ofdm_err_cnt; u32 ht_ok_cnt; u32 ht_err_cnt; u32 vht_ok_cnt; u32 vht_err_cnt; u8 min_rssi; u8 pre_min_rssi; u16 fa_history[4]; u8 igi_history[4]; u8 igi_bitmap; bool damping; u8 damping_cnt; u8 damping_rssi; u8 cck_gi_u_bnd; u8 cck_gi_l_bnd; u8 fix_rate; u8 tx_rate; u32 rrsr_val_init; u32 rrsr_mask_min; u8 thermal_avg[RTW_RF_PATH_MAX]; u8 thermal_meter_k; u8 thermal_meter_lck; s8 delta_power_index[RTW_RF_PATH_MAX]; s8 delta_power_index_last[RTW_RF_PATH_MAX]; u8 default_ofdm_index; u8 default_cck_index; bool pwr_trk_triggered; bool pwr_trk_init_trigger; struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX]; s8 txagc_remnant_cck; s8 txagc_remnant_ofdm; u8 rx_cck_agc_report_type; /* backup dack results for each path and I/Q */ u32 dack_adck[RTW_RF_PATH_MAX]; u16 dack_msbk[RTW_RF_PATH_MAX][2][DACK_MSBK_BACKUP_NUM]; u8 dack_dck[RTW_RF_PATH_MAX][2][DACK_DCK_BACKUP_NUM]; struct rtw_dpk_info dpk_info; struct rtw_cfo_track cfo_track; /* [bandwidth 0:20M/1:40M][number of path] */ u8 cck_pd_lv[2][RTW_RF_PATH_MAX]; u32 cck_fa_avg; u8 cck_pd_default; /* save the last rx phy status for debug */ s8 rx_snr[RTW_RF_PATH_MAX]; u8 rx_evm_dbm[RTW_RF_PATH_MAX]; s16 cfo_tail[RTW_RF_PATH_MAX]; u8 rssi[RTW_RF_PATH_MAX]; u8 curr_rx_rate; struct rtw_pkt_count cur_pkt_count; struct rtw_pkt_count last_pkt_count; struct ewma_evm ewma_evm[RTW_EVM_NUM]; struct ewma_snr ewma_snr[RTW_SNR_NUM]; u32 dm_flags; /* enum rtw_dm_cap */ struct rtw_iqk_info iqk; struct rtw_gapk_info gapk; bool is_bt_iqk_timeout; s8 l2h_th_ini; enum rtw_edcca_mode edcca_mode; u8 scan_density; }; struct rtw_efuse { u32 size; u32 physical_size; u32 logical_size; u32 protect_size; u8 addr[ETH_ALEN]; u8 channel_plan; u8 country_code[2]; u8 rf_board_option; u8 rfe_option; u8 power_track_type; u8 thermal_meter[RTW_RF_PATH_MAX]; u8 thermal_meter_k; u8 crystal_cap; u8 ant_div_cfg; u8 ant_div_type; u8 regd; u8 afe; u8 lna_type_2g; u8 lna_type_5g; u8 glna_type; u8 alna_type; bool ext_lna_2g; bool ext_lna_5g; u8 pa_type_2g; u8 pa_type_5g; u8 gpa_type; u8 apa_type; bool ext_pa_2g; bool ext_pa_5g; u8 tx_bb_swing_setting_2g; u8 tx_bb_swing_setting_5g; bool btcoex; /* bt share antenna with wifi */ bool share_ant; u8 bt_setting; u8 usb_mode_switch; struct { u8 hci; u8 bw; u8 ptcl; u8 nss; u8 ant_num; } hw_cap; struct rtw_txpwr_idx txpwr_idx_table[4]; }; struct rtw_phy_cond { #ifdef __LITTLE_ENDIAN u32 rfe:8; u32 intf:4; u32 pkg:4; u32 plat:4; u32 intf_rsvd:4; u32 cut:4; u32 branch:2; u32 neg:1; u32 pos:1; #else u32 pos:1; u32 neg:1; u32 branch:2; u32 cut:4; u32 intf_rsvd:4; u32 plat:4; u32 pkg:4; u32 intf:4; u32 rfe:8; #endif /* for intf:4 */ #define INTF_PCIE BIT(0) #define INTF_USB BIT(1) #define INTF_SDIO BIT(2) /* for branch:2 */ #define BRANCH_IF 0 #define BRANCH_ELIF 1 #define BRANCH_ELSE 2 #define BRANCH_ENDIF 3 }; struct rtw_fifo_conf { /* tx fifo information */ u16 rsvd_boundary; u16 rsvd_pg_num; u16 rsvd_drv_pg_num; u16 txff_pg_num; u16 acq_pg_num; u16 rsvd_drv_addr; u16 rsvd_h2c_info_addr; u16 rsvd_h2c_sta_info_addr; u16 rsvd_h2cq_addr; u16 rsvd_cpu_instr_addr; u16 rsvd_fw_txbuf_addr; u16 rsvd_csibuf_addr; const struct rtw_rqpn *rqpn; }; struct rtw_fwcd_desc { u32 size; u8 *next; u8 *data; }; struct rtw_fwcd_segs { const u32 *segs; u8 num; }; #define FW_CD_TYPE 0xffff #define FW_CD_LEN 4 #define FW_CD_VAL 0xaabbccdd struct rtw_fw_state { const struct firmware *firmware; struct rtw_dev *rtwdev; struct completion completion; struct rtw_fwcd_desc fwcd_desc; u16 version; u8 sub_version; u8 sub_index; u16 h2c_version; u32 feature; u32 feature_ext; enum rtw_fw_type type; }; enum rtw_sar_sources { RTW_SAR_SOURCE_NONE, RTW_SAR_SOURCE_COMMON, }; enum rtw_sar_bands { RTW_SAR_BAND_0, RTW_SAR_BAND_1, /* RTW_SAR_BAND_2, not used now */ RTW_SAR_BAND_3, RTW_SAR_BAND_4, RTW_SAR_BAND_NR, }; /* the union is reserved for other kinds of SAR sources * which might not re-use same format with array common. */ union rtw_sar_cfg { s8 common[RTW_SAR_BAND_NR]; }; struct rtw_sar { enum rtw_sar_sources src; union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_MAX]; }; struct rtw_hal { u32 rcr; u32 chip_version; u8 cut_version; u8 mp_chip; u8 oem_id; u8 pkg_type; struct rtw_phy_cond phy_cond; bool rfe_btg; u8 ps_mode; u8 current_channel; u8 current_primary_channel_index; u8 current_band_width; u8 current_band_type; u8 primary_channel; /* center channel for different available bandwidth, * val of (bw > current_band_width) is invalid */ u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1]; u8 sec_ch_offset; u8 rf_type; u8 rf_path_num; u8 rf_phy_num; u32 antenna_tx; u32 antenna_rx; u8 bfee_sts_cap; bool txrx_1ss; /* protect tx power section */ struct mutex tx_power_mutex; s8 tx_pwr_by_rate_offset_2g[RTW_RF_PATH_MAX] [DESC_RATE_MAX]; s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX] [DESC_RATE_MAX]; s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX] [RTW_RATE_SECTION_MAX]; s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX] [RTW_RATE_SECTION_MAX]; s8 tx_pwr_limit_2g[RTW_REGD_MAX] [RTW_CHANNEL_WIDTH_MAX] [RTW_RATE_SECTION_MAX] [RTW_MAX_CHANNEL_NUM_2G]; s8 tx_pwr_limit_5g[RTW_REGD_MAX] [RTW_CHANNEL_WIDTH_MAX] [RTW_RATE_SECTION_MAX] [RTW_MAX_CHANNEL_NUM_5G]; s8 tx_pwr_tbl[RTW_RF_PATH_MAX] [DESC_RATE_MAX]; enum rtw_sar_bands sar_band; struct rtw_sar sar; /* for 8821c set channel */ u32 ch_param[3]; }; struct rtw_path_div { enum rtw_bb_path current_tx_path; u32 path_a_sum; u32 path_b_sum; u16 path_a_cnt; u16 path_b_cnt; }; struct rtw_chan_info { int pri_ch_idx; int action_id; int bw; u8 extra_info; u8 channel; u16 timeout; }; struct rtw_chan_list { u32 buf_size; u32 ch_num; u32 size; u16 addr; }; struct rtw_hw_scan_info { struct ieee80211_vif *scanning_vif; u8 probe_pg_size; u8 op_pri_ch_idx; u8 op_pri_ch; u8 op_chan; u8 op_bw; }; struct rtw_dev { struct ieee80211_hw *hw; struct device *dev; struct rtw_hci hci; struct rtw_hw_scan_info scan_info; const struct rtw_chip_info *chip; struct rtw_hal hal; struct rtw_fifo_conf fifo; struct rtw_fw_state fw; struct rtw_efuse efuse; struct rtw_sec_desc sec; struct rtw_traffic_stats stats; struct rtw_regd regd; struct rtw_bf_info bf_info; struct rtw_dm_info dm_info; struct rtw_coex coex; /* ensures exclusive access from mac80211 callbacks */ struct mutex mutex; /* watch dog every 2 sec */ struct delayed_work watch_dog_work; u32 watch_dog_cnt; struct list_head rsvd_page_list; /* c2h cmd queue & handler work */ struct sk_buff_head c2h_queue; struct work_struct c2h_work; struct work_struct ips_work; struct work_struct fw_recovery_work; struct work_struct update_beacon_work; /* used to protect txqs list */ spinlock_t txq_lock; struct list_head txqs; struct workqueue_struct *tx_wq; struct work_struct tx_work; struct work_struct ba_work; struct rtw_tx_report tx_report; struct { /* indicate the mail box to use with fw */ u8 last_box_num; u32 seq; } h2c; /* lps power state & handler work */ struct rtw_lps_conf lps_conf; bool ps_enabled; bool beacon_loss; struct completion lps_leave_check; struct rtw_debugfs *debugfs; u8 sta_cnt; u32 rts_threshold; DECLARE_BITMAP(hw_port, RTW_PORT_NUM); DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM); DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS); u8 mp_mode; struct rtw_path_div dm_path_div; struct rtw_fw_state wow_fw; struct rtw_wow_param wow; bool need_rfk; struct completion fw_scan_density; bool ap_active; /* hci related data, must be last */ u8 priv[] __aligned(sizeof(void *)); }; #include "hci.h" static inline bool rtw_is_assoc(struct rtw_dev *rtwdev) { return !!rtwdev->sta_cnt; } static inline struct ieee80211_txq *rtwtxq_to_txq(struct rtw_txq *rtwtxq) { void *p = rtwtxq; return container_of(p, struct ieee80211_txq, drv_priv); } static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif) { void *p = rtwvif; return container_of(p, struct ieee80211_vif, drv_priv); } static inline void rtw_chip_efuse_grant_on(struct rtw_dev *rtwdev) { if (rtwdev->chip->ops->efuse_grant) rtwdev->chip->ops->efuse_grant(rtwdev, true); } static inline void rtw_chip_efuse_grant_off(struct rtw_dev *rtwdev) { if (rtwdev->chip->ops->efuse_grant) rtwdev->chip->ops->efuse_grant(rtwdev, false); } static inline bool rtw_chip_wcpu_11n(struct rtw_dev *rtwdev) { return rtwdev->chip->wlan_cpu == RTW_WCPU_11N; } static inline bool rtw_chip_wcpu_11ac(struct rtw_dev *rtwdev) { return rtwdev->chip->wlan_cpu == RTW_WCPU_11AC; } static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev) { return rtwdev->chip->rx_ldpc; } static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev) { return rtwdev->chip->tx_stbc; } static inline u8 rtw_acquire_macid(struct rtw_dev *rtwdev) { unsigned long mac_id; mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); if (mac_id < RTW_MAX_MAC_ID_NUM) set_bit(mac_id, rtwdev->mac_id_map); return mac_id; } static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) { clear_bit(mac_id, rtwdev->mac_id_map); } static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev) { if (rtwdev->chip->ops->dump_fw_crash) return rtwdev->chip->ops->dump_fw_crash(rtwdev); return 0; } static inline enum nl80211_band rtw_hw_to_nl80211_band(enum rtw_supported_band hw_band) { switch (hw_band) { default: case RTW_BAND_2G: return NL80211_BAND_2GHZ; case RTW_BAND_5G: return NL80211_BAND_5GHZ; case RTW_BAND_60G: return NL80211_BAND_60GHZ; } } void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel); void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period); void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *ch_param); bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val); bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value); void rtw_restore_reg(struct rtw_dev *rtwdev, struct rtw_backup_info *bckp, u32 num); void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss); void rtw_set_channel(struct rtw_dev *rtwdev); void rtw_chip_prepare_tx(struct rtw_dev *rtwdev); void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u32 config); void rtw_tx_report_purge_timer(struct timer_list *t); void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, bool reset_ra_mask); void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, const u8 *mac_addr, bool hw_scan); void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan); int rtw_core_start(struct rtw_dev *rtwdev); void rtw_core_stop(struct rtw_dev *rtwdev); int rtw_chip_info_setup(struct rtw_dev *rtwdev); int rtw_core_init(struct rtw_dev *rtwdev); void rtw_core_deinit(struct rtw_dev *rtwdev); int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw); void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw); u16 rtw_desc_to_bitrate(u8 desc_rate); void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, struct ieee80211_bss_conf *conf); int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct ieee80211_vif *vif); void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist); void rtw_fw_recovery(struct rtw_dev *rtwdev); void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start); int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, u32 fwcd_item); int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size); void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool config_1ss); void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel, u8 primary_channel, enum rtw_supported_band band, enum rtw_bandwidth bandwidth); void rtw_core_port_switch(struct rtw_dev *rtwdev, struct ieee80211_vif *vif); bool rtw_core_check_sta_active(struct rtw_dev *rtwdev); void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable); #endif |
5 5 5 5 5 5 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 | // SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/revoke.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 * * Copyright 2000 Red Hat corp --- All Rights Reserved * * Journal revoke routines for the generic filesystem journaling code; * part of the ext2fs journaling system. * * Revoke is the mechanism used to prevent old log records for deleted * metadata from being replayed on top of newer data using the same * blocks. The revoke mechanism is used in two separate places: * * + Commit: during commit we write the entire list of the current * transaction's revoked blocks to the journal * * + Recovery: during recovery we record the transaction ID of all * revoked blocks. If there are multiple revoke records in the log * for a single block, only the last one counts, and if there is a log * entry for a block beyond the last revoke, then that log entry still * gets replayed. * * We can get interactions between revokes and new log data within a * single transaction: * * Block is revoked and then journaled: * The desired end result is the journaling of the new block, so we * cancel the revoke before the transaction commits. * * Block is journaled and then revoked: * The revoke must take precedence over the write of the block, so we * need either to cancel the journal entry or to write the revoke * later in the log than the log block. In this case, we choose the * latter: journaling a block cancels any revoke record for that block * in the current transaction, so any revoke for that block in the * transaction must have happened after the block was journaled and so * the revoke must take precedence. * * Block is revoked and then written as data: * The data write is allowed to succeed, but the revoke is _not_ * cancelled. We still need to prevent old log records from * overwriting the new data. We don't even need to clear the revoke * bit here. * * We cache revoke status of a buffer in the current transaction in b_states * bits. As the name says, revokevalid flag indicates that the cached revoke * status of a buffer is valid and we can rely on the cached status. * * Revoke information on buffers is a tri-state value: * * RevokeValid clear: no cached revoke status, need to look it up * RevokeValid set, Revoked clear: * buffer has not been revoked, and cancel_revoke * need do nothing. * RevokeValid set, Revoked set: * buffer has been revoked. * * Locking rules: * We keep two hash tables of revoke records. One hashtable belongs to the * running transaction (is pointed to by journal->j_revoke), the other one * belongs to the committing transaction. Accesses to the second hash table * happen only from the kjournald and no other thread touches this table. Also * journal_switch_revoke_table() which switches which hashtable belongs to the * running and which to the committing transaction is called only from * kjournald. Therefore we need no locks when accessing the hashtable belonging * to the committing transaction. * * All users operating on the hash table belonging to the running transaction * have a handle to the transaction. Therefore they are safe from kjournald * switching hash tables under them. For operations on the lists of entries in * the hash table j_revoke_lock is used. * * Finally, also replay code uses the hash tables but at this moment no one else * can touch them (filesystem isn't mounted yet) and hence no locking is * needed. */ #ifndef __KERNEL__ #include "jfs_user.h" #else #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/log2.h> #include <linux/hash.h> #endif static struct kmem_cache *jbd2_revoke_record_cache; static struct kmem_cache *jbd2_revoke_table_cache; /* Each revoke record represents one single revoked block. During journal replay, this involves recording the transaction ID of the last transaction to revoke this block. */ struct jbd2_revoke_record_s { struct list_head hash; tid_t sequence; /* Used for recovery only */ unsigned long long blocknr; }; /* The revoke table is just a simple hash table of revoke records. */ struct jbd2_revoke_table_s { /* It is conceivable that we might want a larger hash table * for recovery. Must be a power of two. */ int hash_size; int hash_shift; struct list_head *hash_table; }; #ifdef __KERNEL__ static void write_one_revoke_record(transaction_t *, struct list_head *, struct buffer_head **, int *, struct jbd2_revoke_record_s *); static void flush_descriptor(journal_t *, struct buffer_head *, int); #endif /* Utility functions to maintain the revoke table */ static inline int hash(journal_t *journal, unsigned long long block) { return hash_64(block, journal->j_revoke->hash_shift); } static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr, tid_t seq) { struct list_head *hash_list; struct jbd2_revoke_record_s *record; gfp_t gfp_mask = GFP_NOFS; if (journal_oom_retry) gfp_mask |= __GFP_NOFAIL; record = kmem_cache_alloc(jbd2_revoke_record_cache, gfp_mask); if (!record) return -ENOMEM; record->sequence = seq; record->blocknr = blocknr; hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; spin_lock(&journal->j_revoke_lock); list_add(&record->hash, hash_list); spin_unlock(&journal->j_revoke_lock); return 0; } /* Find a revoke record in the journal's hash table. */ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal, unsigned long long blocknr) { struct list_head *hash_list; struct jbd2_revoke_record_s *record; hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; spin_lock(&journal->j_revoke_lock); record = (struct jbd2_revoke_record_s *) hash_list->next; while (&(record->hash) != hash_list) { if (record->blocknr == blocknr) { spin_unlock(&journal->j_revoke_lock); return record; } record = (struct jbd2_revoke_record_s *) record->hash.next; } spin_unlock(&journal->j_revoke_lock); return NULL; } void jbd2_journal_destroy_revoke_record_cache(void) { kmem_cache_destroy(jbd2_revoke_record_cache); jbd2_revoke_record_cache = NULL; } void jbd2_journal_destroy_revoke_table_cache(void) { kmem_cache_destroy(jbd2_revoke_table_cache); jbd2_revoke_table_cache = NULL; } int __init jbd2_journal_init_revoke_record_cache(void) { J_ASSERT(!jbd2_revoke_record_cache); jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s, SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY); if (!jbd2_revoke_record_cache) { pr_emerg("JBD2: failed to create revoke_record cache\n"); return -ENOMEM; } return 0; } int __init jbd2_journal_init_revoke_table_cache(void) { J_ASSERT(!jbd2_revoke_table_cache); jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s, SLAB_TEMPORARY); if (!jbd2_revoke_table_cache) { pr_emerg("JBD2: failed to create revoke_table cache\n"); return -ENOMEM; } return 0; } static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size) { int shift = 0; int tmp = hash_size; struct jbd2_revoke_table_s *table; table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL); if (!table) goto out; while((tmp >>= 1UL) != 0UL) shift++; table->hash_size = hash_size; table->hash_shift = shift; table->hash_table = kmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL); if (!table->hash_table) { kmem_cache_free(jbd2_revoke_table_cache, table); table = NULL; goto out; } for (tmp = 0; tmp < hash_size; tmp++) INIT_LIST_HEAD(&table->hash_table[tmp]); out: return table; } static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table) { int i; struct list_head *hash_list; for (i = 0; i < table->hash_size; i++) { hash_list = &table->hash_table[i]; J_ASSERT(list_empty(hash_list)); } kfree(table->hash_table); kmem_cache_free(jbd2_revoke_table_cache, table); } /* Initialise the revoke table for a given journal to a given size. */ int jbd2_journal_init_revoke(journal_t *journal, int hash_size) { J_ASSERT(journal->j_revoke_table[0] == NULL); J_ASSERT(is_power_of_2(hash_size)); journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size); if (!journal->j_revoke_table[0]) goto fail0; journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size); if (!journal->j_revoke_table[1]) goto fail1; journal->j_revoke = journal->j_revoke_table[1]; spin_lock_init(&journal->j_revoke_lock); return 0; fail1: jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); journal->j_revoke_table[0] = NULL; fail0: return -ENOMEM; } /* Destroy a journal's revoke table. The table must already be empty! */ void jbd2_journal_destroy_revoke(journal_t *journal) { journal->j_revoke = NULL; if (journal->j_revoke_table[0]) jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); if (journal->j_revoke_table[1]) jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]); } #ifdef __KERNEL__ /* * jbd2_journal_revoke: revoke a given buffer_head from the journal. This * prevents the block from being replayed during recovery if we take a * crash after this current transaction commits. Any subsequent * metadata writes of the buffer in this transaction cancel the * revoke. * * Note that this call may block --- it is up to the caller to make * sure that there are no further calls to journal_write_metadata * before the revoke is complete. In ext3, this implies calling the * revoke before clearing the block bitmap when we are deleting * metadata. * * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a * parameter, but does _not_ forget the buffer_head if the bh was only * found implicitly. * * bh_in may not be a journalled buffer - it may have come off * the hash tables without an attached journal_head. * * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count * by one. */ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr, struct buffer_head *bh_in) { struct buffer_head *bh = NULL; journal_t *journal; struct block_device *bdev; int err; might_sleep(); if (bh_in) BUFFER_TRACE(bh_in, "enter"); journal = handle->h_transaction->t_journal; if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){ J_ASSERT (!"Cannot set revoke feature!"); return -EINVAL; } bdev = journal->j_fs_dev; bh = bh_in; if (!bh) { bh = __find_get_block(bdev, blocknr, journal->j_blocksize); if (bh) BUFFER_TRACE(bh, "found on hash"); } #ifdef JBD2_EXPENSIVE_CHECKING else { struct buffer_head *bh2; /* If there is a different buffer_head lying around in * memory anywhere... */ bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); if (bh2) { /* ... and it has RevokeValid status... */ if (bh2 != bh && buffer_revokevalid(bh2)) /* ...then it better be revoked too, * since it's illegal to create a revoke * record against a buffer_head which is * not marked revoked --- that would * risk missing a subsequent revoke * cancel. */ J_ASSERT_BH(bh2, buffer_revoked(bh2)); put_bh(bh2); } } #endif if (WARN_ON_ONCE(handle->h_revoke_credits <= 0)) { if (!bh_in) brelse(bh); return -EIO; } /* We really ought not ever to revoke twice in a row without first having the revoke cancelled: it's illegal to free a block twice without allocating it in between! */ if (bh) { if (!J_EXPECT_BH(bh, !buffer_revoked(bh), "inconsistent data on disk")) { if (!bh_in) brelse(bh); return -EIO; } set_buffer_revoked(bh); set_buffer_revokevalid(bh); if (bh_in) { BUFFER_TRACE(bh_in, "call jbd2_journal_forget"); jbd2_journal_forget(handle, bh_in); } else { BUFFER_TRACE(bh, "call brelse"); __brelse(bh); } } handle->h_revoke_credits--; jbd2_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in); err = insert_revoke_hash(journal, blocknr, handle->h_transaction->t_tid); BUFFER_TRACE(bh_in, "exit"); return err; } /* * Cancel an outstanding revoke. For use only internally by the * journaling code (called from jbd2_journal_get_write_access). * * We trust buffer_revoked() on the buffer if the buffer is already * being journaled: if there is no revoke pending on the buffer, then we * don't do anything here. * * This would break if it were possible for a buffer to be revoked and * discarded, and then reallocated within the same transaction. In such * a case we would have lost the revoked bit, but when we arrived here * the second time we would still have a pending revoke to cancel. So, * do not trust the Revoked bit on buffers unless RevokeValid is also * set. */ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh) { struct jbd2_revoke_record_s *record; journal_t *journal = handle->h_transaction->t_journal; int need_cancel; int did_revoke = 0; /* akpm: debug */ struct buffer_head *bh = jh2bh(jh); jbd2_debug(4, "journal_head %p, cancelling revoke\n", jh); /* Is the existing Revoke bit valid? If so, we trust it, and * only perform the full cancel if the revoke bit is set. If * not, we can't trust the revoke bit, and we need to do the * full search for a revoke record. */ if (test_set_buffer_revokevalid(bh)) { need_cancel = test_clear_buffer_revoked(bh); } else { need_cancel = 1; clear_buffer_revoked(bh); } if (need_cancel) { record = find_revoke_record(journal, bh->b_blocknr); if (record) { jbd2_debug(4, "cancelled existing revoke on " "blocknr %llu\n", (unsigned long long)bh->b_blocknr); spin_lock(&journal->j_revoke_lock); list_del(&record->hash); spin_unlock(&journal->j_revoke_lock); kmem_cache_free(jbd2_revoke_record_cache, record); did_revoke = 1; } } #ifdef JBD2_EXPENSIVE_CHECKING /* There better not be one left behind by now! */ record = find_revoke_record(journal, bh->b_blocknr); J_ASSERT_JH(jh, record == NULL); #endif /* Finally, have we just cleared revoke on an unhashed * buffer_head? If so, we'd better make sure we clear the * revoked status on any hashed alias too, otherwise the revoke * state machine will get very upset later on. */ if (need_cancel) { struct buffer_head *bh2; bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); if (bh2) { if (bh2 != bh) clear_buffer_revoked(bh2); __brelse(bh2); } } return did_revoke; } /* * journal_clear_revoked_flag clears revoked flag of buffers in * revoke table to reflect there is no revoked buffers in the next * transaction which is going to be started. */ void jbd2_clear_buffer_revoked_flags(journal_t *journal) { struct jbd2_revoke_table_s *revoke = journal->j_revoke; int i = 0; for (i = 0; i < revoke->hash_size; i++) { struct list_head *hash_list; struct list_head *list_entry; hash_list = &revoke->hash_table[i]; list_for_each(list_entry, hash_list) { struct jbd2_revoke_record_s *record; struct buffer_head *bh; record = (struct jbd2_revoke_record_s *)list_entry; bh = __find_get_block(journal->j_fs_dev, record->blocknr, journal->j_blocksize); if (bh) { clear_buffer_revoked(bh); __brelse(bh); } } } } /* journal_switch_revoke table select j_revoke for next transaction * we do not want to suspend any processing until all revokes are * written -bzzz */ void jbd2_journal_switch_revoke_table(journal_t *journal) { int i; if (journal->j_revoke == journal->j_revoke_table[0]) journal->j_revoke = journal->j_revoke_table[1]; else journal->j_revoke = journal->j_revoke_table[0]; for (i = 0; i < journal->j_revoke->hash_size; i++) INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); } /* * Write revoke records to the journal for all entries in the current * revoke hash, deleting the entries as we go. */ void jbd2_journal_write_revoke_records(transaction_t *transaction, struct list_head *log_bufs) { journal_t *journal = transaction->t_journal; struct buffer_head *descriptor; struct jbd2_revoke_record_s *record; struct jbd2_revoke_table_s *revoke; struct list_head *hash_list; int i, offset, count; descriptor = NULL; offset = 0; count = 0; /* select revoke table for committing transaction */ revoke = journal->j_revoke == journal->j_revoke_table[0] ? journal->j_revoke_table[1] : journal->j_revoke_table[0]; for (i = 0; i < revoke->hash_size; i++) { hash_list = &revoke->hash_table[i]; while (!list_empty(hash_list)) { record = (struct jbd2_revoke_record_s *) hash_list->next; write_one_revoke_record(transaction, log_bufs, &descriptor, &offset, record); count++; list_del(&record->hash); kmem_cache_free(jbd2_revoke_record_cache, record); } } if (descriptor) flush_descriptor(journal, descriptor, offset); jbd2_debug(1, "Wrote %d revoke records\n", count); } /* * Write out one revoke record. We need to create a new descriptor * block if the old one is full or if we have not already created one. */ static void write_one_revoke_record(transaction_t *transaction, struct list_head *log_bufs, struct buffer_head **descriptorp, int *offsetp, struct jbd2_revoke_record_s *record) { journal_t *journal = transaction->t_journal; int csum_size = 0; struct buffer_head *descriptor; int sz, offset; /* If we are already aborting, this all becomes a noop. We still need to go round the loop in jbd2_journal_write_revoke_records in order to free all of the revoke records: only the IO to the journal is omitted. */ if (is_journal_aborted(journal)) return; descriptor = *descriptorp; offset = *offsetp; /* Do we need to leave space at the end for a checksum? */ if (jbd2_journal_has_csum_v2or3(journal)) csum_size = sizeof(struct jbd2_journal_block_tail); if (jbd2_has_feature_64bit(journal)) sz = 8; else sz = 4; /* Make sure we have a descriptor with space left for the record */ if (descriptor) { if (offset + sz > journal->j_blocksize - csum_size) { flush_descriptor(journal, descriptor, offset); descriptor = NULL; } } if (!descriptor) { descriptor = jbd2_journal_get_descriptor_buffer(transaction, JBD2_REVOKE_BLOCK); if (!descriptor) return; /* Record it so that we can wait for IO completion later */ BUFFER_TRACE(descriptor, "file in log_bufs"); jbd2_file_log_bh(log_bufs, descriptor); offset = sizeof(jbd2_journal_revoke_header_t); *descriptorp = descriptor; } if (jbd2_has_feature_64bit(journal)) * ((__be64 *)(&descriptor->b_data[offset])) = cpu_to_be64(record->blocknr); else * ((__be32 *)(&descriptor->b_data[offset])) = cpu_to_be32(record->blocknr); offset += sz; *offsetp = offset; } /* * Flush a revoke descriptor out to the journal. If we are aborting, * this is a noop; otherwise we are generating a buffer which needs to * be waited for during commit, so it has to go onto the appropriate * journal buffer list. */ static void flush_descriptor(journal_t *journal, struct buffer_head *descriptor, int offset) { jbd2_journal_revoke_header_t *header; if (is_journal_aborted(journal)) return; header = (jbd2_journal_revoke_header_t *)descriptor->b_data; header->r_count = cpu_to_be32(offset); jbd2_descriptor_block_csum_set(journal, descriptor); set_buffer_jwrite(descriptor); BUFFER_TRACE(descriptor, "write"); set_buffer_dirty(descriptor); write_dirty_buffer(descriptor, REQ_SYNC); } #endif /* * Revoke support for recovery. * * Recovery needs to be able to: * * record all revoke records, including the tid of the latest instance * of each revoke in the journal * * check whether a given block in a given transaction should be replayed * (ie. has not been revoked by a revoke record in that or a subsequent * transaction) * * empty the revoke table after recovery. */ /* * First, setting revoke records. We create a new revoke record for * every block ever revoked in the log as we scan it for recovery, and * we update the existing records if we find multiple revokes for a * single block. */ int jbd2_journal_set_revoke(journal_t *journal, unsigned long long blocknr, tid_t sequence) { struct jbd2_revoke_record_s *record; record = find_revoke_record(journal, blocknr); if (record) { /* If we have multiple occurrences, only record the * latest sequence number in the hashed record */ if (tid_gt(sequence, record->sequence)) record->sequence = sequence; return 0; } return insert_revoke_hash(journal, blocknr, sequence); } /* * Test revoke records. For a given block referenced in the log, has * that block been revoked? A revoke record with a given transaction * sequence number revokes all blocks in that transaction and earlier * ones, but later transactions still need replayed. */ int jbd2_journal_test_revoke(journal_t *journal, unsigned long long blocknr, tid_t sequence) { struct jbd2_revoke_record_s *record; record = find_revoke_record(journal, blocknr); if (!record) return 0; if (tid_gt(sequence, record->sequence)) return 0; return 1; } /* * Finally, once recovery is over, we need to clear the revoke table so * that it can be reused by the running filesystem. */ void jbd2_journal_clear_revoke(journal_t *journal) { int i; struct list_head *hash_list; struct jbd2_revoke_record_s *record; struct jbd2_revoke_table_s *revoke; revoke = journal->j_revoke; for (i = 0; i < revoke->hash_size; i++) { hash_list = &revoke->hash_table[i]; while (!list_empty(hash_list)) { record = (struct jbd2_revoke_record_s*) hash_list->next; list_del(&record->hash); kmem_cache_free(jbd2_revoke_record_cache, record); } } } |
251 13 60 2867 2867 5 12 8 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_MM_H #define _LINUX_SCHED_MM_H #include <linux/kernel.h> #include <linux/atomic.h> #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/gfp.h> #include <linux/sync_core.h> #include <linux/sched/coredump.h> /* * Routines for handling mm_structs */ extern struct mm_struct *mm_alloc(void); /** * mmgrab() - Pin a &struct mm_struct. * @mm: The &struct mm_struct to pin. * * Make sure that @mm will not get freed even after the owning task * exits. This doesn't guarantee that the associated address space * will still exist later on and mmget_not_zero() has to be used before * accessing it. * * This is a preferred way to pin @mm for a longer/unbounded amount * of time. * * Use mmdrop() to release the reference acquired by mmgrab(). * * See also <Documentation/mm/active_mm.rst> for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmgrab(struct mm_struct *mm) { atomic_inc(&mm->mm_count); } static inline void smp_mb__after_mmgrab(void) { smp_mb__after_atomic(); } extern void __mmdrop(struct mm_struct *mm); static inline void mmdrop(struct mm_struct *mm) { /* * The implicit full barrier implied by atomic_dec_and_test() is * required by the membarrier system call before returning to * user-space, after storing to rq->curr. */ if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } #ifdef CONFIG_PREEMPT_RT /* * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is * by far the least expensive way to do that. */ static inline void __mmdrop_delayed(struct rcu_head *rhp) { struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); __mmdrop(mm); } /* * Invoked from finish_task_switch(). Delegates the heavy lifting on RT * kernels via RCU. */ static inline void mmdrop_sched(struct mm_struct *mm) { /* Provides a full memory barrier. See mmdrop() */ if (atomic_dec_and_test(&mm->mm_count)) call_rcu(&mm->delayed_drop, __mmdrop_delayed); } #else static inline void mmdrop_sched(struct mm_struct *mm) { mmdrop(mm); } #endif /* Helpers for lazy TLB mm refcounting */ static inline void mmgrab_lazy_tlb(struct mm_struct *mm) { if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) mmgrab(mm); } static inline void mmdrop_lazy_tlb(struct mm_struct *mm) { if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) { mmdrop(mm); } else { /* * mmdrop_lazy_tlb must provide a full memory barrier, see the * membarrier comment finish_task_switch which relies on this. */ smp_mb(); } } static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm) { if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) mmdrop_sched(mm); else smp_mb(); /* see mmdrop_lazy_tlb() above */ } /** * mmget() - Pin the address space associated with a &struct mm_struct. * @mm: The address space to pin. * * Make sure that the address space of the given &struct mm_struct doesn't * go away. This does not protect against parts of the address space being * modified or freed, however. * * Never use this function to pin this address space for an * unbounded/indefinite amount of time. * * Use mmput() to release the reference acquired by mmget(). * * See also <Documentation/mm/active_mm.rst> for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmget(struct mm_struct *mm) { atomic_inc(&mm->mm_users); } static inline bool mmget_not_zero(struct mm_struct *mm) { return atomic_inc_not_zero(&mm->mm_users); } /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); #ifdef CONFIG_MMU /* same as above but performs the slow path from the async context. Can * be called from the atomic context as well */ void mmput_async(struct mm_struct *); #endif /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* * Grab a reference to a task's mm, if it is not already going away * and ptrace_may_access with the mode parameter passed to it * succeeds. */ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); /* Remove the current tasks stale references to the old mm_struct on exit() */ extern void exit_mm_release(struct task_struct *, struct mm_struct *); /* Remove the current tasks stale references to the old mm_struct on exec() */ extern void exec_mm_release(struct task_struct *, struct mm_struct *); #ifdef CONFIG_MEMCG extern void mm_update_next_owner(struct mm_struct *mm); #else static inline void mm_update_next_owner(struct mm_struct *mm) { } #endif /* CONFIG_MEMCG */ #ifdef CONFIG_MMU #ifndef arch_get_mmap_end #define arch_get_mmap_end(addr, len, flags) (TASK_SIZE) #endif #ifndef arch_get_mmap_base #define arch_get_mmap_base(addr, base) (base) #endif extern void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack); unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t); unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); unsigned long generic_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); unsigned long generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); #else static inline void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) {} #endif static inline bool in_vfork(struct task_struct *tsk) { bool ret; /* * need RCU to access ->real_parent if CLONE_VM was used along with * CLONE_PARENT. * * We check real_parent->mm == tsk->mm because CLONE_VFORK does not * imply CLONE_VM * * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus * ->real_parent is not necessarily the task doing vfork(), so in * theory we can't rely on task_lock() if we want to dereference it. * * And in this case we can't trust the real_parent->mm == tsk->mm * check, it can be false negative. But we do not care, if init or * another oom-unkillable task does this it should blame itself. */ rcu_read_lock(); ret = tsk->vfork_done && rcu_dereference(tsk->real_parent)->mm == tsk->mm; rcu_read_unlock(); return ret; } /* * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS * PF_MEMALLOC_PIN implies !GFP_MOVABLE */ static inline gfp_t current_gfp_context(gfp_t flags) { unsigned int pflags = READ_ONCE(current->flags); if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence */ if (pflags & PF_MEMALLOC_NOIO) flags &= ~(__GFP_IO | __GFP_FS); else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; if (pflags & PF_MEMALLOC_PIN) flags &= ~__GFP_MOVABLE; } return flags; } #ifdef CONFIG_LOCKDEP extern void __fs_reclaim_acquire(unsigned long ip); extern void __fs_reclaim_release(unsigned long ip); extern void fs_reclaim_acquire(gfp_t gfp_mask); extern void fs_reclaim_release(gfp_t gfp_mask); #else static inline void __fs_reclaim_acquire(unsigned long ip) { } static inline void __fs_reclaim_release(unsigned long ip) { } static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } static inline void fs_reclaim_release(gfp_t gfp_mask) { } #endif /* Any memory-allocation retry loop should use * memalloc_retry_wait(), and pass the flags for the most * constrained allocation attempt that might have failed. * This provides useful documentation of where loops are, * and a central place to fine tune the waiting as the MM * implementation changes. */ static inline void memalloc_retry_wait(gfp_t gfp_flags) { /* We use io_schedule_timeout because waiting for memory * typically included waiting for dirty pages to be * written out, which requires IO. */ __set_current_state(TASK_UNINTERRUPTIBLE); gfp_flags = current_gfp_context(gfp_flags); if (gfpflags_allow_blocking(gfp_flags) && !(gfp_flags & __GFP_NORETRY)) /* Probably waited already, no need for much more */ io_schedule_timeout(1); else /* Probably didn't wait, and has now released a lock, * so now is a good time to wait */ io_schedule_timeout(HZ/50); } /** * might_alloc - Mark possible allocation sites * @gfp_mask: gfp_t flags that would be used to allocate * * Similar to might_sleep() and other annotations, this can be used in functions * that might allocate, but often don't. Compiles to nothing without * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. */ static inline void might_alloc(gfp_t gfp_mask) { fs_reclaim_acquire(gfp_mask); fs_reclaim_release(gfp_mask); might_sleep_if(gfpflags_allow_blocking(gfp_mask)); } /** * memalloc_flags_save - Add a PF_* flag to current->flags, save old value * * This allows PF_* flags to be conveniently added, irrespective of current * value, and then the old version restored with memalloc_flags_restore(). */ static inline unsigned memalloc_flags_save(unsigned flags) { unsigned oldflags = ~current->flags & flags; current->flags |= flags; return oldflags; } static inline void memalloc_flags_restore(unsigned flags) { current->flags &= ~flags; } /** * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. * * This functions marks the beginning of the GFP_NOIO allocation scope. * All further allocations will implicitly drop __GFP_IO flag and so * they are safe for the IO critical section from the allocation recursion * point of view. Use memalloc_noio_restore to end the scope with flags * returned by this function. * * Context: This function is safe to be used from any context. * Return: The saved flags to be passed to memalloc_noio_restore. */ static inline unsigned int memalloc_noio_save(void) { return memalloc_flags_save(PF_MEMALLOC_NOIO); } /** * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. * @flags: Flags to restore. * * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. * Always make sure that the given flags is the return value from the * pairing memalloc_noio_save call. */ static inline void memalloc_noio_restore(unsigned int flags) { memalloc_flags_restore(flags); } /** * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. * * This functions marks the beginning of the GFP_NOFS allocation scope. * All further allocations will implicitly drop __GFP_FS flag and so * they are safe for the FS critical section from the allocation recursion * point of view. Use memalloc_nofs_restore to end the scope with flags * returned by this function. * * Context: This function is safe to be used from any context. * Return: The saved flags to be passed to memalloc_nofs_restore. */ static inline unsigned int memalloc_nofs_save(void) { return memalloc_flags_save(PF_MEMALLOC_NOFS); } /** * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. * @flags: Flags to restore. * * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. * Always make sure that the given flags is the return value from the * pairing memalloc_nofs_save call. */ static inline void memalloc_nofs_restore(unsigned int flags) { memalloc_flags_restore(flags); } /** * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope. * * This function marks the beginning of the __GFP_MEMALLOC allocation scope. * All further allocations will implicitly add the __GFP_MEMALLOC flag, which * prevents entering reclaim and allows access to all memory reserves. This * should only be used when the caller guarantees the allocation will allow more * memory to be freed very shortly, i.e. it needs to allocate some memory in * the process of freeing memory, and cannot reclaim due to potential recursion. * * Users of this scope have to be extremely careful to not deplete the reserves * completely and implement a throttling mechanism which controls the * consumption of the reserve based on the amount of freed memory. Usage of a * pre-allocated pool (e.g. mempool) should be always considered before using * this scope. * * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC * * Context: This function should not be used in an interrupt context as that one * does not give PF_MEMALLOC access to reserves. * See __gfp_pfmemalloc_flags(). * Return: The saved flags to be passed to memalloc_noreclaim_restore. */ static inline unsigned int memalloc_noreclaim_save(void) { return memalloc_flags_save(PF_MEMALLOC); } /** * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope. * @flags: Flags to restore. * * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save * function. Always make sure that the given flags is the return value from the * pairing memalloc_noreclaim_save call. */ static inline void memalloc_noreclaim_restore(unsigned int flags) { memalloc_flags_restore(flags); } /** * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope. * * This function marks the beginning of the ~__GFP_MOVABLE allocation scope. * All further allocations will implicitly remove the __GFP_MOVABLE flag, which * will constraint the allocations to zones that allow long term pinning, i.e. * not ZONE_MOVABLE zones. * * Return: The saved flags to be passed to memalloc_pin_restore. */ static inline unsigned int memalloc_pin_save(void) { return memalloc_flags_save(PF_MEMALLOC_PIN); } /** * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope. * @flags: Flags to restore. * * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function. * Always make sure that the given flags is the return value from the pairing * memalloc_pin_save call. */ static inline void memalloc_pin_restore(unsigned int flags) { memalloc_flags_restore(flags); } #ifdef CONFIG_MEMCG DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); /** * set_active_memcg - Starts the remote memcg charging scope. * @memcg: memcg to charge. * * This function marks the beginning of the remote memcg charging scope. All the * __GFP_ACCOUNT allocations till the end of the scope will be charged to the * given memcg. * * Please, make sure that caller has a reference to the passed memcg structure, * so its lifetime is guaranteed to exceed the scope between two * set_active_memcg() calls. * * NOTE: This function can nest. Users must save the return value and * reset the previous value after their own charging scope is over. */ static inline struct mem_cgroup * set_active_memcg(struct mem_cgroup *memcg) { struct mem_cgroup *old; if (!in_task()) { old = this_cpu_read(int_active_memcg); this_cpu_write(int_active_memcg, memcg); } else { old = current->active_memcg; current->active_memcg = memcg; } return old; } #else static inline struct mem_cgroup * set_active_memcg(struct mem_cgroup *memcg) { return NULL; } #endif #ifdef CONFIG_MEMBARRIER enum { MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6), MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7), }; enum { MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), MEMBARRIER_FLAG_RSEQ = (1U << 1), }; #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS #include <asm/membarrier.h> #endif static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) { if (current->mm != mm) return; if (likely(!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) return; sync_core_before_usermode(); } extern void membarrier_exec_mmap(struct mm_struct *mm); extern void membarrier_update_current_mm(struct mm_struct *next_mm); #else #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS static inline void membarrier_arch_switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { } #endif static inline void membarrier_exec_mmap(struct mm_struct *mm) { } static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) { } static inline void membarrier_update_current_mm(struct mm_struct *next_mm) { } #endif #endif /* _LINUX_SCHED_MM_H */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 | /* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> */ #ifndef __MT76x02_H #define __MT76x02_H #include <linux/kfifo.h> #include "mt76.h" #include "mt76x02_regs.h" #include "mt76x02_mac.h" #include "mt76x02_dfs.h" #include "mt76x02_dma.h" #define MT76x02_TX_RING_SIZE 512 #define MT76x02_PSD_RING_SIZE 128 #define MT76x02_N_WCIDS 128 #define MT_CALIBRATE_INTERVAL HZ #define MT_MAC_WORK_INTERVAL (HZ / 10) #define MT_WATCHDOG_TIME (HZ / 10) #define MT_TX_HANG_TH 10 #define MT_MAX_CHAINS 2 struct mt76x02_rx_freq_cal { s8 high_gain[MT_MAX_CHAINS]; s8 rssi_offset[MT_MAX_CHAINS]; s8 lna_gain; u32 mcu_gain; s16 temp_offset; u8 freq_offset; }; struct mt76x02_calibration { struct mt76x02_rx_freq_cal rx; u8 agc_gain_init[MT_MAX_CHAINS]; u8 agc_gain_cur[MT_MAX_CHAINS]; u16 false_cca; s8 avg_rssi_all; s8 agc_gain_adjust; s8 agc_lowest_gain; s8 low_gain; s8 temp_vco; s8 temp; bool init_cal_done; bool tssi_cal_done; bool tssi_comp_pending; bool dpd_cal_done; bool channel_cal_done; bool gain_init_done; int tssi_target; s8 tssi_dc; }; struct mt76x02_beacon_ops { unsigned int nslots; unsigned int slot_size; void (*pre_tbtt_enable)(struct mt76x02_dev *dev, bool en); void (*beacon_enable)(struct mt76x02_dev *dev, bool en); }; #define mt76x02_beacon_enable(dev, enable) \ (dev)->beacon_ops->beacon_enable(dev, enable) #define mt76x02_pre_tbtt_enable(dev, enable) \ (dev)->beacon_ops->pre_tbtt_enable(dev, enable) struct mt76x02_rate_power { union { struct { s8 cck[4]; s8 ofdm[8]; s8 ht[16]; s8 vht[2]; }; s8 all[30]; }; }; struct mt76x02_dev { union { /* must be first */ struct mt76_dev mt76; struct mt76_phy mphy; }; struct mac_address macaddr_list[8]; struct mutex phy_mutex; u8 txdone_seq; DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); spinlock_t txstatus_fifo_lock; u32 tx_airtime; u32 ampdu_ref; struct sk_buff *rx_head; struct delayed_work cal_work; struct delayed_work wdt_work; struct hrtimer pre_tbtt_timer; struct work_struct pre_tbtt_work; const struct mt76x02_beacon_ops *beacon_ops; u8 beacon_data_count; u8 tbtt_count; u32 tx_hang_reset; u8 tx_hang_check[4]; u8 beacon_hang_check; u8 mcu_timeout; struct mt76x02_rate_power rate_power; struct mt76x02_calibration cal; int txpower_conf; s8 target_power; s8 target_power_delta[2]; bool enable_tpc; bool no_2ghz; s16 coverage_class; u8 slottime; struct mt76x02_dfs_pattern_detector dfs_pd; /* edcca monitor */ unsigned long ed_trigger_timeout; bool ed_tx_blocked; bool ed_monitor; u8 ed_monitor_enabled; u8 ed_monitor_learning; u8 ed_trigger; u8 ed_silent; ktime_t ed_time; }; extern struct ieee80211_rate mt76x02_rates[12]; int mt76x02_init_device(struct mt76x02_dev *dev); void mt76x02_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev); int mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params); void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate); s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj); void mt76x02_wdt_work(struct work_struct *work); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); void mt76x02_set_tx_ackto(struct mt76x02_dev *dev); void mt76x02_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class); int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val); void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info); void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); void mt76x02_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed); void mt76x02_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); struct beacon_bc_data { struct mt76x02_dev *dev; struct sk_buff_head q; struct sk_buff *tail[8]; }; void mt76x02_init_beacon_config(struct mt76x02_dev *dev); void mt76x02e_init_beacon_config(struct mt76x02_dev *dev); void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev); void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data, int max_nframes); void mt76x02_mac_start(struct mt76x02_dev *dev); void mt76x02_init_debugfs(struct mt76x02_dev *dev); static inline bool is_mt76x0(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7610 || mt76_chip(&dev->mt76) == 0x7630 || mt76_chip(&dev->mt76) == 0x7650; } static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || mt76_chip(&dev->mt76) == 0x7632 || mt76_chip(&dev->mt76) == 0x7662 || mt76_chip(&dev->mt76) == 0x7602; } static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) { mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask); } static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask) { mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } static inline bool mt76x02_wait_for_txrx_idle(struct mt76_dev *dev) { return __mt76_poll_msec(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100); } static inline struct mt76x02_sta * mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) { struct mt76_wcid *wcid; if (idx >= MT76x02_N_WCIDS) return NULL; wcid = rcu_dereference(dev->wcid[idx]); if (!wcid) return NULL; return container_of(wcid, struct mt76x02_sta, wcid); } static inline struct mt76_wcid * mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) { if (!sta) return NULL; if (unicast) return &sta->wcid; else return &sta->vif->group_wcid; } #endif /* __MT76x02_H */ |
3 3 3 3 3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 | // SPDX-License-Identifier: GPL-2.0 /* * Emagic EMI 2|6 usb audio interface firmware loader. * Copyright (C) 2002 * Tapio Laxström (tapio.laxstrom@iptime.fi) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/ihex.h> /* include firmware (variables)*/ /* FIXME: This is quick and dirty solution! */ #define SPDIF /* if you want SPDIF comment next line */ //#undef SPDIF /* if you want MIDI uncomment this line */ #ifdef SPDIF #define FIRMWARE_FW "emi62/spdif.fw" #else #define FIRMWARE_FW "emi62/midi.fw" #endif #define EMI62_VENDOR_ID 0x086a /* Emagic Soft-und Hardware GmBH */ #define EMI62_PRODUCT_ID 0x0110 /* EMI 6|2m without firmware */ #define ANCHOR_LOAD_INTERNAL 0xA0 /* Vendor specific request code for Anchor Upload/Download (This one is implemented in the core) */ #define ANCHOR_LOAD_EXTERNAL 0xA3 /* This command is not implemented in the core. Requires firmware */ #define ANCHOR_LOAD_FPGA 0xA5 /* This command is not implemented in the core. Requires firmware. Emagic extension */ #define MAX_INTERNAL_ADDRESS 0x1B3F /* This is the highest internal RAM address for the AN2131Q */ #define CPUCS_REG 0x7F92 /* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */ #define INTERNAL_RAM(address) (address <= MAX_INTERNAL_ADDRESS) static int emi62_writememory(struct usb_device *dev, int address, const unsigned char *data, int length, __u8 bRequest); static int emi62_set_reset(struct usb_device *dev, unsigned char reset_bit); static int emi62_load_firmware (struct usb_device *dev); static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id); static void emi62_disconnect(struct usb_interface *intf); /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi62_writememory(struct usb_device *dev, int address, const unsigned char *data, int length, __u8 request) { int result; unsigned char *buffer = kmemdup(data, length, GFP_KERNEL); if (!buffer) { dev_err(&dev->dev, "kmalloc(%d) failed.\n", length); return -ENOMEM; } /* Note: usb_control_msg returns negative value on error or length of the * data that was written! */ result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300); kfree (buffer); return result; } /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi62_set_reset (struct usb_device *dev, unsigned char reset_bit) { int response; dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit); response = emi62_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0); if (response < 0) dev_err(&dev->dev, "set_reset (%d) failed\n", reset_bit); return response; } #define FW_LOAD_SIZE 1023 static int emi62_load_firmware (struct usb_device *dev) { const struct firmware *loader_fw = NULL; const struct firmware *bitstream_fw = NULL; const struct firmware *firmware_fw = NULL; const struct ihex_binrec *rec; int err = -ENOMEM; int i; __u32 addr; /* Address to write */ __u8 *buf; dev_dbg(&dev->dev, "load_firmware\n"); buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL); if (!buf) goto wraperr; err = request_ihex_firmware(&loader_fw, "emi62/loader.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&bitstream_fw, "emi62/bitstream.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&firmware_fw, FIRMWARE_FW, &dev->dev); if (err) { nofw: goto wraperr; } /* Assert reset (stop the CPU in the EMI) */ err = emi62_set_reset(dev,1); if (err < 0) goto wraperr; rec = (const struct ihex_binrec *)loader_fw->data; /* 1. We need to put the loader for the FPGA into the EZ-USB */ while (rec) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; rec = ihex_next_binrec(rec); } /* De-assert reset (let the CPU run) */ err = emi62_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* 2. We upload the FPGA firmware into the EMI * Note: collect up to 1023 (yes!) bytes and send them with * a single request. This is _much_ faster! */ rec = (const struct ihex_binrec *)bitstream_fw->data; do { i = 0; addr = be32_to_cpu(rec->addr); /* intel hex records are terminated with type 0 element */ while (rec && (i + be16_to_cpu(rec->len) < FW_LOAD_SIZE)) { memcpy(buf + i, rec->data, be16_to_cpu(rec->len)); i += be16_to_cpu(rec->len); rec = ihex_next_binrec(rec); } err = emi62_writememory(dev, addr, buf, i, ANCHOR_LOAD_FPGA); if (err < 0) goto wraperr; } while (rec); /* Assert reset (stop the CPU in the EMI) */ err = emi62_set_reset(dev,1); if (err < 0) goto wraperr; /* 3. We need to put the loader for the firmware into the EZ-USB (again...) */ for (rec = (const struct ihex_binrec *)loader_fw->data; rec; rec = ihex_next_binrec(rec)) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; } /* De-assert reset (let the CPU run) */ err = emi62_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */ for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (!INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_EXTERNAL); if (err < 0) goto wraperr; } } /* Assert reset (stop the CPU in the EMI) */ err = emi62_set_reset(dev,1); if (err < 0) goto wraperr; for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_EXTERNAL); if (err < 0) goto wraperr; } } /* De-assert reset (let the CPU run) */ err = emi62_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ release_firmware(loader_fw); release_firmware(bitstream_fw); release_firmware(firmware_fw); kfree(buf); /* return 1 to fail the driver inialization * and give real driver change to load */ return 1; wraperr: if (err < 0) dev_err(&dev->dev,"%s - error loading firmware: error = %d\n", __func__, err); release_firmware(loader_fw); release_firmware(bitstream_fw); release_firmware(firmware_fw); kfree(buf); dev_err(&dev->dev, "Error\n"); return err; } static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, id_table); static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); dev_dbg(&intf->dev, "emi62_probe\n"); dev_info(&intf->dev, "%s start\n", __func__); emi62_load_firmware(dev); /* do not return the driver context, let real audio driver do that */ return -EIO; } static void emi62_disconnect(struct usb_interface *intf) { } static struct usb_driver emi62_driver = { .name = "emi62 - firmware loader", .probe = emi62_probe, .disconnect = emi62_disconnect, .id_table = id_table, }; module_usb_driver(emi62_driver); MODULE_AUTHOR("Tapio Laxström"); MODULE_DESCRIPTION("Emagic EMI 6|2m firmware loader."); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("emi62/loader.fw"); MODULE_FIRMWARE("emi62/bitstream.fw"); MODULE_FIRMWARE(FIRMWARE_FW); /* vi:ai:syntax=c:sw=8:ts=8:tw=80 */ |
2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux driver for M2Tech hiFace compatible devices * * Copyright 2012-2013 (C) M2TECH S.r.l and Amarula Solutions B.V. * * Authors: Michael Trimarchi <michael@amarulasolutions.com> * Antonio Ospite <ao2@amarulasolutions.com> * * The driver is based on the work done in TerraTec DMX 6Fire USB */ #include <linux/slab.h> #include <sound/pcm.h> #include "pcm.h" #include "chip.h" #define OUT_EP 0x2 #define PCM_N_URBS 8 #define PCM_PACKET_SIZE 4096 #define PCM_BUFFER_SIZE (2 * PCM_N_URBS * PCM_PACKET_SIZE) struct pcm_urb { struct hiface_chip *chip; struct urb instance; struct usb_anchor submitted; u8 *buffer; }; struct pcm_substream { spinlock_t lock; struct snd_pcm_substream *instance; bool active; snd_pcm_uframes_t dma_off; /* current position in alsa dma_area */ snd_pcm_uframes_t period_off; /* current position in current period */ }; enum { /* pcm streaming states */ STREAM_DISABLED, /* no pcm streaming */ STREAM_STARTING, /* pcm streaming requested, waiting to become ready */ STREAM_RUNNING, /* pcm streaming running */ STREAM_STOPPING }; struct pcm_runtime { struct hiface_chip *chip; struct snd_pcm *instance; struct pcm_substream playback; bool panic; /* if set driver won't do anymore pcm on device */ struct pcm_urb out_urbs[PCM_N_URBS]; struct mutex stream_mutex; u8 stream_state; /* one of STREAM_XXX */ u8 extra_freq; wait_queue_head_t stream_wait_queue; bool stream_wait_cond; }; static const unsigned int rates[] = { 44100, 48000, 88200, 96000, 176400, 192000, 352800, 384000 }; static const struct snd_pcm_hw_constraint_list constraints_extra_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; static const struct snd_pcm_hardware pcm_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH, .formats = SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, .rate_min = 44100, .rate_max = 192000, /* changes in hiface_pcm_open to support extra rates */ .channels_min = 2, .channels_max = 2, .buffer_bytes_max = PCM_BUFFER_SIZE, .period_bytes_min = PCM_PACKET_SIZE, .period_bytes_max = PCM_BUFFER_SIZE, .periods_min = 2, .periods_max = 1024 }; /* message values used to change the sample rate */ #define HIFACE_SET_RATE_REQUEST 0xb0 #define HIFACE_RATE_44100 0x43 #define HIFACE_RATE_48000 0x4b #define HIFACE_RATE_88200 0x42 #define HIFACE_RATE_96000 0x4a #define HIFACE_RATE_176400 0x40 #define HIFACE_RATE_192000 0x48 #define HIFACE_RATE_352800 0x58 #define HIFACE_RATE_384000 0x68 static int hiface_pcm_set_rate(struct pcm_runtime *rt, unsigned int rate) { struct usb_device *device = rt->chip->dev; u16 rate_value; int ret; /* We are already sure that the rate is supported here thanks to * ALSA constraints */ switch (rate) { case 44100: rate_value = HIFACE_RATE_44100; break; case 48000: rate_value = HIFACE_RATE_48000; break; case 88200: rate_value = HIFACE_RATE_88200; break; case 96000: rate_value = HIFACE_RATE_96000; break; case 176400: rate_value = HIFACE_RATE_176400; break; case 192000: rate_value = HIFACE_RATE_192000; break; case 352800: rate_value = HIFACE_RATE_352800; break; case 384000: rate_value = HIFACE_RATE_384000; break; default: dev_err(&device->dev, "Unsupported rate %d\n", rate); return -EINVAL; } /* * USBIO: Vendor 0xb0(wValue=0x0043, wIndex=0x0000) * 43 b0 43 00 00 00 00 00 * USBIO: Vendor 0xb0(wValue=0x004b, wIndex=0x0000) * 43 b0 4b 00 00 00 00 00 * This control message doesn't have any ack from the * other side */ ret = usb_control_msg_send(device, 0, HIFACE_SET_RATE_REQUEST, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, rate_value, 0, NULL, 0, 100, GFP_KERNEL); if (ret) dev_err(&device->dev, "Error setting samplerate %d.\n", rate); return ret; } static struct pcm_substream *hiface_pcm_get_substream(struct snd_pcm_substream *alsa_sub) { struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); struct device *device = &rt->chip->dev->dev; if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) return &rt->playback; dev_err(device, "Error getting pcm substream slot.\n"); return NULL; } /* call with stream_mutex locked */ static void hiface_pcm_stream_stop(struct pcm_runtime *rt) { int i, time; if (rt->stream_state != STREAM_DISABLED) { rt->stream_state = STREAM_STOPPING; for (i = 0; i < PCM_N_URBS; i++) { time = usb_wait_anchor_empty_timeout( &rt->out_urbs[i].submitted, 100); if (!time) usb_kill_anchored_urbs( &rt->out_urbs[i].submitted); usb_kill_urb(&rt->out_urbs[i].instance); } rt->stream_state = STREAM_DISABLED; } } /* call with stream_mutex locked */ static int hiface_pcm_stream_start(struct pcm_runtime *rt) { int ret = 0; int i; if (rt->stream_state == STREAM_DISABLED) { /* reset panic state when starting a new stream */ rt->panic = false; /* submit our out urbs zero init */ rt->stream_state = STREAM_STARTING; for (i = 0; i < PCM_N_URBS; i++) { memset(rt->out_urbs[i].buffer, 0, PCM_PACKET_SIZE); usb_anchor_urb(&rt->out_urbs[i].instance, &rt->out_urbs[i].submitted); ret = usb_submit_urb(&rt->out_urbs[i].instance, GFP_ATOMIC); if (ret) { hiface_pcm_stream_stop(rt); return ret; } } /* wait for first out urb to return (sent in urb handler) */ wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond, HZ); if (rt->stream_wait_cond) { struct device *device = &rt->chip->dev->dev; dev_dbg(device, "%s: Stream is running wakeup event\n", __func__); rt->stream_state = STREAM_RUNNING; } else { hiface_pcm_stream_stop(rt); return -EIO; } } return ret; } /* The hardware wants word-swapped 32-bit values */ static void memcpy_swahw32(u8 *dest, u8 *src, unsigned int n) { unsigned int i; for (i = 0; i < n / 4; i++) ((u32 *)dest)[i] = swahw32(((u32 *)src)[i]); } /* call with substream locked */ /* returns true if a period elapsed */ static bool hiface_pcm_playback(struct pcm_substream *sub, struct pcm_urb *urb) { struct snd_pcm_runtime *alsa_rt = sub->instance->runtime; struct device *device = &urb->chip->dev->dev; u8 *source; unsigned int pcm_buffer_size; WARN_ON(alsa_rt->format != SNDRV_PCM_FORMAT_S32_LE); pcm_buffer_size = snd_pcm_lib_buffer_bytes(sub->instance); if (sub->dma_off + PCM_PACKET_SIZE <= pcm_buffer_size) { dev_dbg(device, "%s: (1) buffer_size %#x dma_offset %#x\n", __func__, (unsigned int) pcm_buffer_size, (unsigned int) sub->dma_off); source = alsa_rt->dma_area + sub->dma_off; memcpy_swahw32(urb->buffer, source, PCM_PACKET_SIZE); } else { /* wrap around at end of ring buffer */ unsigned int len; dev_dbg(device, "%s: (2) buffer_size %#x dma_offset %#x\n", __func__, (unsigned int) pcm_buffer_size, (unsigned int) sub->dma_off); len = pcm_buffer_size - sub->dma_off; source = alsa_rt->dma_area + sub->dma_off; memcpy_swahw32(urb->buffer, source, len); source = alsa_rt->dma_area; memcpy_swahw32(urb->buffer + len, source, PCM_PACKET_SIZE - len); } sub->dma_off += PCM_PACKET_SIZE; if (sub->dma_off >= pcm_buffer_size) sub->dma_off -= pcm_buffer_size; sub->period_off += PCM_PACKET_SIZE; if (sub->period_off >= alsa_rt->period_size) { sub->period_off %= alsa_rt->period_size; return true; } return false; } static void hiface_pcm_out_urb_handler(struct urb *usb_urb) { struct pcm_urb *out_urb = usb_urb->context; struct pcm_runtime *rt = out_urb->chip->pcm; struct pcm_substream *sub; bool do_period_elapsed = false; unsigned long flags; int ret; if (rt->panic || rt->stream_state == STREAM_STOPPING) return; if (unlikely(usb_urb->status == -ENOENT || /* unlinked */ usb_urb->status == -ENODEV || /* device removed */ usb_urb->status == -ECONNRESET || /* unlinked */ usb_urb->status == -ESHUTDOWN)) { /* device disabled */ goto out_fail; } if (rt->stream_state == STREAM_STARTING) { rt->stream_wait_cond = true; wake_up(&rt->stream_wait_queue); } /* now send our playback data (if a free out urb was found) */ sub = &rt->playback; spin_lock_irqsave(&sub->lock, flags); if (sub->active) do_period_elapsed = hiface_pcm_playback(sub, out_urb); else memset(out_urb->buffer, 0, PCM_PACKET_SIZE); spin_unlock_irqrestore(&sub->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(sub->instance); ret = usb_submit_urb(&out_urb->instance, GFP_ATOMIC); if (ret < 0) goto out_fail; return; out_fail: rt->panic = true; } static int hiface_pcm_open(struct snd_pcm_substream *alsa_sub) { struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); struct pcm_substream *sub = NULL; struct snd_pcm_runtime *alsa_rt = alsa_sub->runtime; int ret; if (rt->panic) return -EPIPE; mutex_lock(&rt->stream_mutex); alsa_rt->hw = pcm_hw; if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) sub = &rt->playback; if (!sub) { struct device *device = &rt->chip->dev->dev; mutex_unlock(&rt->stream_mutex); dev_err(device, "Invalid stream type\n"); return -EINVAL; } if (rt->extra_freq) { alsa_rt->hw.rates |= SNDRV_PCM_RATE_KNOT; alsa_rt->hw.rate_max = 384000; /* explicit constraints needed as we added SNDRV_PCM_RATE_KNOT */ ret = snd_pcm_hw_constraint_list(alsa_sub->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_extra_rates); if (ret < 0) { mutex_unlock(&rt->stream_mutex); return ret; } } sub->instance = alsa_sub; sub->active = false; mutex_unlock(&rt->stream_mutex); return 0; } static int hiface_pcm_close(struct snd_pcm_substream *alsa_sub) { struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); struct pcm_substream *sub = hiface_pcm_get_substream(alsa_sub); unsigned long flags; if (rt->panic) return 0; mutex_lock(&rt->stream_mutex); if (sub) { hiface_pcm_stream_stop(rt); /* deactivate substream */ spin_lock_irqsave(&sub->lock, flags); sub->instance = NULL; sub->active = false; spin_unlock_irqrestore(&sub->lock, flags); } mutex_unlock(&rt->stream_mutex); return 0; } static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub) { struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); struct pcm_substream *sub = hiface_pcm_get_substream(alsa_sub); struct snd_pcm_runtime *alsa_rt = alsa_sub->runtime; int ret; if (rt->panic) return -EPIPE; if (!sub) return -ENODEV; mutex_lock(&rt->stream_mutex); hiface_pcm_stream_stop(rt); sub->dma_off = 0; sub->period_off = 0; if (rt->stream_state == STREAM_DISABLED) { ret = hiface_pcm_set_rate(rt, alsa_rt->rate); if (ret) { mutex_unlock(&rt->stream_mutex); return ret; } ret = hiface_pcm_stream_start(rt); if (ret) { mutex_unlock(&rt->stream_mutex); return ret; } } mutex_unlock(&rt->stream_mutex); return 0; } static int hiface_pcm_trigger(struct snd_pcm_substream *alsa_sub, int cmd) { struct pcm_substream *sub = hiface_pcm_get_substream(alsa_sub); struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); if (rt->panic) return -EPIPE; if (!sub) return -ENODEV; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: spin_lock_irq(&sub->lock); sub->active = true; spin_unlock_irq(&sub->lock); return 0; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: spin_lock_irq(&sub->lock); sub->active = false; spin_unlock_irq(&sub->lock); return 0; default: return -EINVAL; } } static snd_pcm_uframes_t hiface_pcm_pointer(struct snd_pcm_substream *alsa_sub) { struct pcm_substream *sub = hiface_pcm_get_substream(alsa_sub); struct pcm_runtime *rt = snd_pcm_substream_chip(alsa_sub); unsigned long flags; snd_pcm_uframes_t dma_offset; if (rt->panic || !sub) return SNDRV_PCM_POS_XRUN; spin_lock_irqsave(&sub->lock, flags); dma_offset = sub->dma_off; spin_unlock_irqrestore(&sub->lock, flags); return bytes_to_frames(alsa_sub->runtime, dma_offset); } static const struct snd_pcm_ops pcm_ops = { .open = hiface_pcm_open, .close = hiface_pcm_close, .prepare = hiface_pcm_prepare, .trigger = hiface_pcm_trigger, .pointer = hiface_pcm_pointer, }; static int hiface_pcm_init_urb(struct pcm_urb *urb, struct hiface_chip *chip, unsigned int ep, void (*handler)(struct urb *)) { urb->chip = chip; usb_init_urb(&urb->instance); urb->buffer = kzalloc(PCM_PACKET_SIZE, GFP_KERNEL); if (!urb->buffer) return -ENOMEM; usb_fill_bulk_urb(&urb->instance, chip->dev, usb_sndbulkpipe(chip->dev, ep), (void *)urb->buffer, PCM_PACKET_SIZE, handler, urb); if (usb_urb_ep_type_check(&urb->instance)) return -EINVAL; init_usb_anchor(&urb->submitted); return 0; } void hiface_pcm_abort(struct hiface_chip *chip) { struct pcm_runtime *rt = chip->pcm; if (rt) { rt->panic = true; mutex_lock(&rt->stream_mutex); hiface_pcm_stream_stop(rt); mutex_unlock(&rt->stream_mutex); } } static void hiface_pcm_destroy(struct hiface_chip *chip) { struct pcm_runtime *rt = chip->pcm; int i; for (i = 0; i < PCM_N_URBS; i++) kfree(rt->out_urbs[i].buffer); kfree(chip->pcm); chip->pcm = NULL; } static void hiface_pcm_free(struct snd_pcm *pcm) { struct pcm_runtime *rt = pcm->private_data; if (rt) hiface_pcm_destroy(rt->chip); } int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq) { int i; int ret; struct snd_pcm *pcm; struct pcm_runtime *rt; rt = kzalloc(sizeof(*rt), GFP_KERNEL); if (!rt) return -ENOMEM; rt->chip = chip; rt->stream_state = STREAM_DISABLED; if (extra_freq) rt->extra_freq = 1; init_waitqueue_head(&rt->stream_wait_queue); mutex_init(&rt->stream_mutex); spin_lock_init(&rt->playback.lock); for (i = 0; i < PCM_N_URBS; i++) { ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP, hiface_pcm_out_urb_handler); if (ret < 0) goto error; } ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm); if (ret < 0) { dev_err(&chip->dev->dev, "Cannot create pcm instance\n"); goto error; } pcm->private_data = rt; pcm->private_free = hiface_pcm_free; strscpy(pcm->name, "USB-SPDIF Audio", sizeof(pcm->name)); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_ops); snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0); rt->instance = pcm; chip->pcm = rt; return 0; error: for (i = 0; i < PCM_N_URBS; i++) kfree(rt->out_urbs[i].buffer); kfree(rt); return ret; } |
3014 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM ipi #if !defined(_TRACE_IPI_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IPI_H #include <linux/tracepoint.h> /** * ipi_raise - called when a smp cross call is made * * @mask: mask of recipient CPUs for the IPI * @reason: string identifying the IPI purpose * * It is necessary for @reason to be a static string declared with * __tracepoint_string. */ TRACE_EVENT(ipi_raise, TP_PROTO(const struct cpumask *mask, const char *reason), TP_ARGS(mask, reason), TP_STRUCT__entry( __bitmask(target_cpus, nr_cpumask_bits) __field(const char *, reason) ), TP_fast_assign( __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits); __entry->reason = reason; ), TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason) ); TRACE_EVENT(ipi_send_cpu, TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback), TP_ARGS(cpu, callsite, callback), TP_STRUCT__entry( __field(unsigned int, cpu) __field(void *, callsite) __field(void *, callback) ), TP_fast_assign( __entry->cpu = cpu; __entry->callsite = (void *)callsite; __entry->callback = callback; ), TP_printk("cpu=%u callsite=%pS callback=%pS", __entry->cpu, __entry->callsite, __entry->callback) ); TRACE_EVENT(ipi_send_cpumask, TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback), TP_ARGS(cpumask, callsite, callback), TP_STRUCT__entry( __cpumask(cpumask) __field(void *, callsite) __field(void *, callback) ), TP_fast_assign( __assign_cpumask(cpumask, cpumask_bits(cpumask)); __entry->callsite = (void *)callsite; __entry->callback = callback; ), TP_printk("cpumask=%s callsite=%pS callback=%pS", __get_cpumask(cpumask), __entry->callsite, __entry->callback) ); DECLARE_EVENT_CLASS(ipi_handler, TP_PROTO(const char *reason), TP_ARGS(reason), TP_STRUCT__entry( __field(const char *, reason) ), TP_fast_assign( __entry->reason = reason; ), TP_printk("(%s)", __entry->reason) ); /** * ipi_entry - called immediately before the IPI handler * * @reason: string identifying the IPI purpose * * It is necessary for @reason to be a static string declared with * __tracepoint_string, ideally the same as used with trace_ipi_raise * for that IPI. */ DEFINE_EVENT(ipi_handler, ipi_entry, TP_PROTO(const char *reason), TP_ARGS(reason) ); /** * ipi_exit - called immediately after the IPI handler returns * * @reason: string identifying the IPI purpose * * It is necessary for @reason to be a static string declared with * __tracepoint_string, ideally the same as used with trace_ipi_raise for * that IPI. */ DEFINE_EVENT(ipi_handler, ipi_exit, TP_PROTO(const char *reason), TP_ARGS(reason) ); #endif /* _TRACE_IPI_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
2 2 2 2 2 2 2 2 6 5 5 5 2 5 4 5 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 | // SPDX-License-Identifier: GPL-2.0 /* Parts of this driver are based on the following: * - Kvaser linux leaf driver (version 4.78) * - CAN driver for esd CAN-USB/2 * - Kvaser linux usbcanII driver (version 5.3) * * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be> * Copyright (C) 2015 Valeo S.A. */ #include <linux/completion.h> #include <linux/device.h> #include <linux/gfp.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/units.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/netlink.h> #include "kvaser_usb.h" #define MAX_USBCAN_NET_DEVICES 2 /* Command header size */ #define CMD_HEADER_LEN 2 /* Kvaser CAN message flags */ #define MSG_FLAG_ERROR_FRAME BIT(0) #define MSG_FLAG_OVERRUN BIT(1) #define MSG_FLAG_NERR BIT(2) #define MSG_FLAG_WAKEUP BIT(3) #define MSG_FLAG_REMOTE_FRAME BIT(4) #define MSG_FLAG_RESERVED BIT(5) #define MSG_FLAG_TX_ACK BIT(6) #define MSG_FLAG_TX_REQUEST BIT(7) /* CAN states (M16C CxSTRH register) */ #define M16C_STATE_BUS_RESET BIT(0) #define M16C_STATE_BUS_ERROR BIT(4) #define M16C_STATE_BUS_PASSIVE BIT(5) #define M16C_STATE_BUS_OFF BIT(6) /* Leaf/usbcan command ids */ #define CMD_RX_STD_MESSAGE 12 #define CMD_TX_STD_MESSAGE 13 #define CMD_RX_EXT_MESSAGE 14 #define CMD_TX_EXT_MESSAGE 15 #define CMD_SET_BUS_PARAMS 16 #define CMD_GET_BUS_PARAMS 17 #define CMD_GET_BUS_PARAMS_REPLY 18 #define CMD_GET_CHIP_STATE 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_CTRL_MODE 21 #define CMD_RESET_CHIP 24 #define CMD_START_CHIP 26 #define CMD_START_CHIP_REPLY 27 #define CMD_STOP_CHIP 28 #define CMD_STOP_CHIP_REPLY 29 #define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33 #define CMD_GET_CARD_INFO 34 #define CMD_GET_CARD_INFO_REPLY 35 #define CMD_GET_SOFTWARE_INFO 38 #define CMD_GET_SOFTWARE_INFO_REPLY 39 #define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_CAN_ERROR_EVENT 51 #define CMD_FLUSH_QUEUE_REPLY 68 #define CMD_GET_CAPABILITIES_REQ 95 #define CMD_GET_CAPABILITIES_RESP 96 #define CMD_LEAF_LOG_MESSAGE 106 /* Leaf frequency options */ #define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 #define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) #define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12) /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) #define M16C_EF_FORME BIT(2) #define M16C_EF_STFE BIT(3) #define M16C_EF_BITE0 BIT(4) #define M16C_EF_BITE1 BIT(5) #define M16C_EF_RCVE BIT(6) #define M16C_EF_TRE BIT(7) /* Only Leaf-based devices can report M16C error factors, * thus define our own error status flags for USBCANII */ #define USBCAN_ERROR_STATE_NONE 0 #define USBCAN_ERROR_STATE_TX_ERROR BIT(0) #define USBCAN_ERROR_STATE_RX_ERROR BIT(1) #define USBCAN_ERROR_STATE_BUSERROR BIT(2) /* ctrl modes */ #define KVASER_CTRL_MODE_NORMAL 1 #define KVASER_CTRL_MODE_SILENT 2 #define KVASER_CTRL_MODE_SELFRECEPTION 3 #define KVASER_CTRL_MODE_OFF 4 /* Extended CAN identifier flag */ #define KVASER_EXTENDED_FRAME BIT(31) /* USBCanII timestamp */ #define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16) #define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10 struct kvaser_cmd_simple { u8 tid; u8 channel; } __packed; struct kvaser_cmd_cardinfo { u8 tid; u8 nchannels; __le32 serial_number; __le32 padding0; __le32 clock_resolution; __le32 mfgdate; u8 ean[8]; u8 hw_revision; union { struct { u8 usb_hs_mode; } __packed leaf1; struct { u8 padding; } __packed usbcan1; } __packed; __le16 padding1; } __packed; struct leaf_cmd_softinfo { u8 tid; u8 padding0; __le32 sw_options; __le32 fw_version; __le16 max_outstanding_tx; __le16 padding1[9]; } __packed; struct usbcan_cmd_softinfo { u8 tid; u8 fw_name[5]; __le16 max_outstanding_tx; u8 padding[6]; __le32 fw_version; __le16 checksum; __le16 sw_options; } __packed; struct kvaser_cmd_busparams { u8 tid; u8 channel; struct kvaser_usb_busparams busparams; } __packed; struct kvaser_cmd_tx_can { u8 channel; u8 tid; u8 data[14]; union { struct { u8 padding; u8 flags; } __packed leaf; struct { u8 flags; u8 padding; } __packed usbcan; } __packed; } __packed; struct kvaser_cmd_rx_can_header { u8 channel; u8 flag; } __packed; struct leaf_cmd_rx_can { u8 channel; u8 flag; __le16 time[3]; u8 data[14]; } __packed; struct usbcan_cmd_rx_can { u8 channel; u8 flag; u8 data[14]; __le16 time; } __packed; struct leaf_cmd_chip_state_event { u8 tid; u8 channel; __le16 time[3]; u8 tx_errors_count; u8 rx_errors_count; u8 status; u8 padding[3]; } __packed; struct usbcan_cmd_chip_state_event { u8 tid; u8 channel; u8 tx_errors_count; u8 rx_errors_count; __le16 time; u8 status; u8 padding[3]; } __packed; struct kvaser_cmd_tx_acknowledge_header { u8 channel; u8 tid; } __packed; struct leaf_cmd_tx_acknowledge { u8 channel; u8 tid; __le16 time[3]; u8 padding[2]; } __packed; struct usbcan_cmd_tx_acknowledge { u8 channel; u8 tid; __le16 time; u8 padding[2]; } __packed; struct leaf_cmd_can_error_event { u8 tid; u8 flags; __le16 time[3]; u8 channel; u8 padding; u8 tx_errors_count; u8 rx_errors_count; u8 status; u8 error_factor; } __packed; struct usbcan_cmd_can_error_event { u8 tid; u8 padding; u8 tx_errors_count_ch0; u8 rx_errors_count_ch0; u8 tx_errors_count_ch1; u8 rx_errors_count_ch1; u8 status_ch0; u8 status_ch1; __le16 time; } __packed; /* CMD_ERROR_EVENT error codes */ #define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8 #define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9 struct leaf_cmd_error_event { u8 tid; u8 error_code; __le16 timestamp[3]; __le16 padding; __le16 info1; __le16 info2; } __packed; struct usbcan_cmd_error_event { u8 tid; u8 error_code; __le16 info1; __le16 info2; __le16 timestamp; __le16 padding; } __packed; struct usbcan_cmd_clk_overflow_event { u8 tid; u8 padding; __le32 time; } __packed; struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; u8 ctrl_mode; u8 padding[3]; } __packed; struct kvaser_cmd_flush_queue { u8 tid; u8 channel; u8 flags; u8 padding[3]; } __packed; struct leaf_cmd_log_message { u8 channel; u8 flags; __le16 time[3]; u8 dlc; u8 time_offset; __le32 id; u8 data[8]; } __packed; /* Sub commands for cap_req and cap_res */ #define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02 #define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05 struct kvaser_cmd_cap_req { __le16 padding0; __le16 cap_cmd; __le16 padding1; __le16 channel; } __packed; /* Status codes for cap_res */ #define KVASER_USB_LEAF_CAP_STAT_OK 0x00 #define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01 #define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02 struct kvaser_cmd_cap_res { __le16 padding; __le16 cap_cmd; __le16 status; __le32 mask; __le32 value; } __packed; struct kvaser_cmd { u8 len; u8 id; union { struct kvaser_cmd_simple simple; struct kvaser_cmd_cardinfo cardinfo; struct kvaser_cmd_busparams busparams; struct kvaser_cmd_rx_can_header rx_can_header; struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header; union { struct leaf_cmd_softinfo softinfo; struct leaf_cmd_rx_can rx_can; struct leaf_cmd_chip_state_event chip_state_event; struct leaf_cmd_can_error_event can_error_event; struct leaf_cmd_log_message log_message; struct leaf_cmd_error_event error_event; struct kvaser_cmd_cap_req cap_req; struct kvaser_cmd_cap_res cap_res; struct leaf_cmd_tx_acknowledge tx_ack; } __packed leaf; union { struct usbcan_cmd_softinfo softinfo; struct usbcan_cmd_rx_can rx_can; struct usbcan_cmd_chip_state_event chip_state_event; struct usbcan_cmd_can_error_event can_error_event; struct usbcan_cmd_error_event error_event; struct usbcan_cmd_tx_acknowledge tx_ack; struct usbcan_cmd_clk_overflow_event clk_overflow_event; } __packed usbcan; struct kvaser_cmd_tx_can tx_can; struct kvaser_cmd_ctrl_mode ctrl_mode; struct kvaser_cmd_flush_queue flush_queue; } u; } __packed; #define CMD_SIZE_ANY 0xff #define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field) static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack), [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo), [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event), [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res), [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams), [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), /* ignored events: */ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, }; static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack), [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo), [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event), }; /* Summary of a kvaser error event, for a unified Leaf/Usbcan error * handling. Some discrepancies between the two families exist: * * - USBCAN firmware does not report M16C "error factors" * - USBCAN controllers has difficulties reporting if the raised error * event is for ch0 or ch1. They leave such arbitration to the OS * driver by letting it compare error counters with previous values * and decide the error event's channel. Thus for USBCAN, the channel * field is only advisory. */ struct kvaser_usb_err_summary { u8 channel, status, txerr, rxerr; union { struct { u8 error_factor; } leaf; struct { u8 other_ch_status; u8 error_state; } usbcan; }; }; struct kvaser_usb_net_leaf_priv { struct kvaser_usb_net_priv *net; struct delayed_work chip_state_req_work; /* started but not reported as bus-on yet */ bool joining_bus; }; static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { .name = "kvaser_usb_ucii", .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 16, .brp_inc = 1, }; static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = { .name = "kvaser_usb_leaf", .tseg1_min = 3, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 2, .brp_max = 128, .brp_inc = 2, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { .clock = { .freq = 8 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 16, .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 24, .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 32, .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 16, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { .clock = { .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 24, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .clock = { .freq = 32 * MEGA /* Hz */, }, .timestamp_freq = 32, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev, __le16 timestamp) { u64 ticks = le16_to_cpu(timestamp) | dev->card_data.usbcan_timestamp_msb; return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR); } static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { /* buffer size >= cmd->len ensured by caller */ u8 min_size = 0; switch (dev->driver_info->family) { case KVASER_LEAF: if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf)) min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id]; break; case KVASER_USBCAN: if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan)) min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id]; break; } if (min_size == CMD_SIZE_ANY) return 0; if (min_size) { min_size += CMD_HEADER_LEN; if (cmd->len >= min_size) return 0; dev_err_ratelimited(&dev->intf->dev, "Received command %u too short (size %u, needed %u)", cmd->id, cmd->len, min_size); return -EIO; } dev_warn_ratelimited(&dev->intf->dev, "Unhandled command (%d, size %d)\n", cmd->id, cmd->len); return -EINVAL; } static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *cmd_len, u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; u8 *cmd_tx_can_flags = NULL; /* GCC */ struct can_frame *cf = (struct can_frame *)skb->data; cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); if (cmd) { cmd->u.tx_can.tid = transid & 0xff; cmd->len = *cmd_len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_tx_can); cmd->u.tx_can.channel = priv->channel; switch (dev->driver_info->family) { case KVASER_LEAF: cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; break; case KVASER_USBCAN: cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags; break; } *cmd_tx_can_flags = 0; if (cf->can_id & CAN_EFF_FLAG) { cmd->id = CMD_TX_EXT_MESSAGE; cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f; cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f; cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f; cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff; cmd->u.tx_can.data[4] = cf->can_id & 0x3f; } else { cmd->id = CMD_TX_STD_MESSAGE; cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f; cmd->u.tx_can.data[1] = cf->can_id & 0x3f; } cmd->u.tx_can.data[5] = can_get_cc_dlc(cf, priv->can.ctrlmode); memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len); if (cf->can_id & CAN_RTR_FLAG) *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; } return cmd; } static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, struct kvaser_cmd *cmd) { struct kvaser_cmd *tmp; void *buf; int actual_len; int err; int pos; unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; do { err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE, &actual_len); if (err < 0) goto end; pos = 0; while (pos <= actual_len - CMD_HEADER_LEN) { tmp = buf + pos; /* Handle commands crossing the USB endpoint max packet * size boundary. Check kvaser_usb_read_bulk_callback() * for further details. */ if (tmp->len == 0) { pos = round_up(pos, le16_to_cpu (dev->bulk_in->wMaxPacketSize)); continue; } if (pos + tmp->len > actual_len) { dev_err_ratelimited(&dev->intf->dev, "Format error\n"); break; } if (tmp->id == id) { memcpy(cmd, tmp, tmp->len); goto end; } pos += tmp->len; } } while (time_before(jiffies, to)); err = -EINVAL; end: kfree(buf); if (err == 0) err = kvaser_usb_leaf_verify_size(dev, cmd); return err; } static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, u8 cmd_id, int channel) { struct kvaser_cmd *cmd; int rc; cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = cmd_id; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); cmd->u.simple.channel = channel; cmd->u.simple.tid = 0xff; rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); kfree(cmd); return rc; } static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, const struct leaf_cmd_softinfo *softinfo) { u32 sw_options = le32_to_cpu(softinfo->sw_options); dev->fw_version = le32_to_cpu(softinfo->fw_version); dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP) dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP; if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { /* Firmware expects bittiming parameters calculated for 16MHz * clock, regardless of the actual clock * Though, the reported freq is used for timestamps */ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz; break; case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz; break; case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz; break; } } else { switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz; break; case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz; break; case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz; break; } } } static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) { struct kvaser_cmd cmd; int err; err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0); if (err) return err; err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd); if (err) return err; switch (dev->driver_info->family) { case KVASER_LEAF: kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; case KVASER_USBCAN: dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg; break; } return 0; } static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev) { int err; int retry = 3; /* On some x86 laptops, plugging a Kvaser device again after * an unplug makes the firmware always ignore the very first * command. For such a case, provide some room for retries * instead of completely exiting the driver. */ do { err = kvaser_usb_leaf_get_software_info_inner(dev); } while (--retry && err == -ETIMEDOUT); return err; } static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; int err; err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0); if (err) return err; err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd); if (err) return err; dev->nchannels = cmd.u.cardinfo.nchannels; if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES || (dev->driver_info->family == KVASER_USBCAN && dev->nchannels > MAX_USBCAN_NET_DEVICES)) return -EINVAL; return 0; } static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev, u16 cap_cmd_req, u16 *status) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; int err; int i; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_GET_CAPABILITIES_REQ; cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req); err = kvaser_usb_send_cmd(dev, cmd, cmd->len); if (err) goto end; err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); if (err) goto end; *status = le16_to_cpu(cmd->u.leaf.cap_res.status); if (*status != KVASER_USB_LEAF_CAP_STAT_OK) goto end; cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd); switch (cap_cmd_res) { case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: value = le32_to_cpu(cmd->u.leaf.cap_res.value); mask = le32_to_cpu(cmd->u.leaf.cap_res.mask); break; default: dev_warn(&dev->intf->dev, "Unknown capability command %u\n", cap_cmd_res); break; } for (i = 0; i < dev->nchannels; i++) { if (BIT(i) & (value & mask)) { switch (cap_cmd_res) { case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: card_data->ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; break; case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: card_data->capabilities |= KVASER_USB_CAP_BERR_CAP; break; } } } end: kfree(cmd); return err; } static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev) { int err; u16 status; if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { dev_info(&dev->intf->dev, "No extended capability support. Upgrade device firmware.\n"); return 0; } err = kvaser_usb_leaf_get_single_capability(dev, KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE, &status); if (err) return err; if (status) dev_info(&dev->intf->dev, "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n", status); err = kvaser_usb_leaf_get_single_capability(dev, KVASER_USB_LEAF_CAP_CMD_ERR_REPORT, &status); if (err) return err; if (status) dev_info(&dev->intf->dev, "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n", status); return 0; } static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev) { int err = 0; if (dev->driver_info->family == KVASER_LEAF) err = kvaser_usb_leaf_get_capabilities_leaf(dev); return err; } static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct net_device_stats *stats; struct kvaser_usb_tx_urb_context *context; struct kvaser_usb_net_priv *priv; unsigned long flags; u8 channel, tid; struct sk_buff *skb; ktime_t hwtstamp = 0; channel = cmd->u.tx_acknowledge_header.channel; tid = cmd->u.tx_acknowledge_header.tid; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; if (!netif_device_present(priv->netdev)) return; stats = &priv->netdev->stats; context = &priv->tx_contexts[tid % dev->max_tx_urbs]; /* Sometimes the state change doesn't come after a bus-off event */ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { struct sk_buff *err_skb; struct can_frame *cf; err_skb = alloc_can_err_skb(priv->netdev, &cf); if (err_skb) { cf->can_id |= CAN_ERR_RESTARTED; netif_rx(err_skb); } else { netdev_err(priv->netdev, "No memory left for err_skb\n"); } priv->can.can_stats.restarts++; netif_carrier_on(priv->netdev); priv->can.state = CAN_STATE_ERROR_ACTIVE; } switch (dev->driver_info->family) { case KVASER_LEAF: hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time); break; case KVASER_USBCAN: hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time); break; } spin_lock_irqsave(&priv->tx_contexts_lock, flags); skb = priv->can.echo_skb[context->echo_index]; if (skb) skb_hwtstamps(skb)->hwtstamp = hwtstamp; stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(priv->netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(priv->netdev); spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); } static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, u8 cmd_id) { struct kvaser_cmd *cmd; int err; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); cmd->id = cmd_id; cmd->u.simple.channel = priv->channel; err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len); if (err) kfree(cmd); return err; } static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work) { struct kvaser_usb_net_leaf_priv *leaf = container_of(work, struct kvaser_usb_net_leaf_priv, chip_state_req_work.work); struct kvaser_usb_net_priv *priv = leaf->net; kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE); } static void kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_err_summary *es, struct can_frame *cf) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; struct kvaser_usb *dev = priv->dev; struct net_device_stats *stats = &priv->netdev->stats; enum can_state cur_state, new_state, tx_state, rx_state; netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status); new_state = priv->can.state; cur_state = priv->can.state; if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { new_state = CAN_STATE_BUS_OFF; } else if (es->status & M16C_STATE_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; } else if ((es->status & M16C_STATE_BUS_ERROR) && cur_state >= CAN_STATE_BUS_OFF) { /* Guard against spurious error events after a busoff */ } else if (es->txerr >= 128 || es->rxerr >= 128) { new_state = CAN_STATE_ERROR_PASSIVE; } else if (es->txerr >= 96 || es->rxerr >= 96) { new_state = CAN_STATE_ERROR_WARNING; } else { new_state = CAN_STATE_ERROR_ACTIVE; } /* 0bfd:0124 FW 4.18.778 was observed to send the initial * CMD_CHIP_STATE_EVENT after CMD_START_CHIP with M16C_STATE_BUS_OFF * bit set if the channel was bus-off when it was last stopped (even * across chip resets). This bit will clear shortly afterwards, without * triggering a second unsolicited chip state event. * Ignore this initial bus-off. */ if (leaf->joining_bus) { if (new_state == CAN_STATE_BUS_OFF) { netdev_dbg(priv->netdev, "ignoring bus-off during startup"); new_state = cur_state; } else { leaf->joining_bus = false; } } if (new_state != cur_state) { tx_state = (es->txerr >= es->rxerr) ? new_state : 0; rx_state = (es->txerr <= es->rxerr) ? new_state : 0; can_change_state(priv->netdev, cf, tx_state, rx_state); } if (priv->can.restart_ms && cur_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { priv->can.can_stats.bus_error++; stats->rx_errors++; } break; case KVASER_USBCAN: if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR) stats->tx_errors++; if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR) stats->rx_errors++; if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) priv->can.can_stats.bus_error++; break; } priv->bec.txerr = es->txerr; priv->bec.rxerr = es->rxerr; } static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, const struct kvaser_usb_err_summary *es) { struct can_frame *cf; struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG, .len = CAN_ERR_DLC }; struct sk_buff *skb; struct net_device_stats *stats; struct kvaser_usb_net_priv *priv; struct kvaser_usb_net_leaf_priv *leaf; enum can_state old_state, new_state; if (es->channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", es->channel); return; } priv = dev->nets[es->channel]; leaf = priv->sub_priv; stats = &priv->netdev->stats; /* Ignore e.g. state change to bus-off reported just after stopping */ if (!netif_running(priv->netdev)) return; /* Update all of the CAN interface's state and error counters before * trying any memory allocation that can actually fail with -ENOMEM. * * We send a temporary stack-allocated error CAN frame to * can_change_state() for the very same reason. * * TODO: Split can_change_state() responsibility between updating the * CAN interface's state and counters, and the setting up of CAN error * frame ID and data to userspace. Remove stack allocation afterwards. */ old_state = priv->can.state; kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); new_state = priv->can.state; /* If there are errors, request status updates periodically as we do * not get automatic notifications of improved state. * Also request updates if we saw a stale BUS_OFF during startup * (joining_bus). */ if (new_state < CAN_STATE_BUS_OFF && (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE || leaf->joining_bus)) schedule_delayed_work(&leaf->chip_state_req_work, msecs_to_jiffies(500)); skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; return; } memcpy(cf, &tmp_cf, sizeof(*cf)); if (new_state != old_state) { if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { if (!priv->can.restart_ms) kvaser_usb_leaf_simple_cmd_async(priv, CMD_STOP_CHIP); netif_carrier_off(priv->netdev); } if (priv->can.restart_ms && old_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_RESTARTED; netif_carrier_on(priv->netdev); } } switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; if (es->leaf.error_factor & M16C_EF_ACKE) cf->data[3] = CAN_ERR_PROT_LOC_ACK; if (es->leaf.error_factor & M16C_EF_CRCE) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; if (es->leaf.error_factor & M16C_EF_FORME) cf->data[2] |= CAN_ERR_PROT_FORM; if (es->leaf.error_factor & M16C_EF_STFE) cf->data[2] |= CAN_ERR_PROT_STUFF; if (es->leaf.error_factor & M16C_EF_BITE0) cf->data[2] |= CAN_ERR_PROT_BIT0; if (es->leaf.error_factor & M16C_EF_BITE1) cf->data[2] |= CAN_ERR_PROT_BIT1; if (es->leaf.error_factor & M16C_EF_TRE) cf->data[2] |= CAN_ERR_PROT_TX; } break; case KVASER_USBCAN: if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) cf->can_id |= CAN_ERR_BUSERROR; break; } if (new_state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = es->txerr; cf->data[7] = es->rxerr; } netif_rx(skb); } /* For USBCAN, report error to userspace if the channels's errors counter * has changed, or we're the only channel seeing a bus error state. */ static void kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, struct kvaser_usb_err_summary *es) { struct kvaser_usb_net_priv *priv; unsigned int channel; bool report_error; channel = es->channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; report_error = false; if (es->txerr != priv->bec.txerr) { es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR; report_error = true; } if (es->rxerr != priv->bec.rxerr) { es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR; report_error = true; } if ((es->status & M16C_STATE_BUS_ERROR) && !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) { es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR; report_error = true; } if (report_error) kvaser_usb_leaf_rx_error(dev, es); } static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_err_summary es = { }; switch (cmd->id) { /* Sometimes errors are sent as unsolicited chip state events */ case CMD_CHIP_STATE_EVENT: es.channel = cmd->u.usbcan.chip_state_event.channel; es.status = cmd->u.usbcan.chip_state_event.status; es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count; es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); break; case CMD_CAN_ERROR_EVENT: es.channel = 0; es.status = cmd->u.usbcan.can_error_event.status_ch0; es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0; es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0; es.usbcan.other_ch_status = cmd->u.usbcan.can_error_event.status_ch1; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); /* The USBCAN firmware supports up to 2 channels. * Now that ch0 was checked, check if ch1 has any errors. */ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { es.channel = 1; es.status = cmd->u.usbcan.can_error_event.status_ch1; es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch1; es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch1; es.usbcan.other_ch_status = cmd->u.usbcan.can_error_event.status_ch0; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); } break; default: dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); } } static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_err_summary es = { }; switch (cmd->id) { case CMD_CAN_ERROR_EVENT: es.channel = cmd->u.leaf.can_error_event.channel; es.status = cmd->u.leaf.can_error_event.status; es.txerr = cmd->u.leaf.can_error_event.tx_errors_count; es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count; es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor; break; case CMD_LEAF_LOG_MESSAGE: es.channel = cmd->u.leaf.log_message.channel; es.status = cmd->u.leaf.log_message.data[0]; es.txerr = cmd->u.leaf.log_message.data[2]; es.rxerr = cmd->u.leaf.log_message.data[3]; es.leaf.error_factor = cmd->u.leaf.log_message.data[1]; break; case CMD_CHIP_STATE_EVENT: es.channel = cmd->u.leaf.chip_state_event.channel; es.status = cmd->u.leaf.chip_state_event.status; es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count; es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count; es.leaf.error_factor = 0; break; default: dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); return; } kvaser_usb_leaf_rx_error(dev, &es); } static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv, const struct kvaser_cmd *cmd) { if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR)) { struct net_device_stats *stats = &priv->netdev->stats; netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", cmd->u.rx_can_header.flag); stats->rx_errors++; return; } if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN) kvaser_usb_can_rx_over_error(priv->netdev); } static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats; u8 channel = cmd->u.rx_can_header.channel; const u8 *rx_data = NULL; /* GCC */ ktime_t hwtstamp = 0; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; stats = &priv->netdev->stats; if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE)) { kvaser_usb_leaf_leaf_rx_error(dev, cmd); return; } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR | MSG_FLAG_OVERRUN)) { kvaser_usb_leaf_rx_can_err(priv, cmd); return; } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { netdev_warn(priv->netdev, "Unhandled frame (flags: 0x%02x)\n", cmd->u.rx_can_header.flag); return; } switch (dev->driver_info->family) { case KVASER_LEAF: rx_data = cmd->u.leaf.rx_can.data; hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time); break; case KVASER_USBCAN: rx_data = cmd->u.usbcan.rx_can.data; hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time); break; } skb = alloc_can_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; return; } if (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE) { cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); if (cf->can_id & KVASER_EXTENDED_FRAME) cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; else cf->can_id &= CAN_SFF_MASK; can_frame_set_cc_len(cf, cmd->u.leaf.log_message.dlc & 0xF, priv->can.ctrlmode); if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else memcpy(cf->data, &cmd->u.leaf.log_message.data, cf->len); } else { cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); if (cmd->id == CMD_RX_EXT_MESSAGE) { cf->can_id <<= 18; cf->can_id |= ((rx_data[2] & 0x0f) << 14) | ((rx_data[3] & 0xff) << 6) | (rx_data[4] & 0x3f); cf->can_id |= CAN_EFF_FLAG; } can_frame_set_cc_len(cf, rx_data[5] & 0xF, priv->can.ctrlmode); if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else memcpy(cf->data, &rx_data[6], cf->len); } skb_hwtstamps(skb)->hwtstamp = hwtstamp; stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; netif_rx(skb); } static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { u16 info1 = 0; switch (dev->driver_info->family) { case KVASER_LEAF: info1 = le16_to_cpu(cmd->u.leaf.error_event.info1); break; case KVASER_USBCAN: info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1); break; } /* info1 will contain the offending cmd_no */ switch (info1) { case CMD_SET_CTRL_MODE: dev_warn(&dev->intf->dev, "CMD_SET_CTRL_MODE error in parameter\n"); break; case CMD_SET_BUS_PARAMS: dev_warn(&dev->intf->dev, "CMD_SET_BUS_PARAMS error in parameter\n"); break; default: dev_warn(&dev->intf->dev, "Unhandled parameter error event cmd_no (%u)\n", info1); break; } } static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { u8 error_code = 0; switch (dev->driver_info->family) { case KVASER_LEAF: error_code = cmd->u.leaf.error_event.error_code; break; case KVASER_USBCAN: error_code = cmd->u.usbcan.error_event.error_code; break; } switch (error_code) { case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL: /* Received additional CAN message, when firmware TX queue is * already full. Something is wrong with the driver. * This should never happen! */ dev_err(&dev->intf->dev, "Received error event TX_QUEUE_FULL\n"); break; case KVASER_USB_LEAF_ERROR_EVENT_PARAM: kvaser_usb_leaf_error_event_parameter(dev, cmd); break; default: dev_warn(&dev->intf->dev, "Unhandled error event (%d)\n", error_code); break; } } static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; if (completion_done(&priv->start_comp) && netif_queue_stopped(priv->netdev)) { netif_wake_queue(priv->netdev); } else { netif_start_queue(priv->netdev); complete(&priv->start_comp); } } static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; complete(&priv->stop_comp); } static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; u8 channel = cmd->u.busparams.channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams, sizeof(priv->busparams_nominal)); complete(&priv->get_busparams_comp); } static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { if (kvaser_usb_leaf_verify_size(dev, cmd) < 0) return; switch (cmd->id) { case CMD_START_CHIP_REPLY: kvaser_usb_leaf_start_chip_reply(dev, cmd); break; case CMD_STOP_CHIP_REPLY: kvaser_usb_leaf_stop_chip_reply(dev, cmd); break; case CMD_RX_STD_MESSAGE: case CMD_RX_EXT_MESSAGE: kvaser_usb_leaf_rx_can_msg(dev, cmd); break; case CMD_LEAF_LOG_MESSAGE: if (dev->driver_info->family != KVASER_LEAF) goto warn; kvaser_usb_leaf_rx_can_msg(dev, cmd); break; case CMD_CHIP_STATE_EVENT: case CMD_CAN_ERROR_EVENT: if (dev->driver_info->family == KVASER_LEAF) kvaser_usb_leaf_leaf_rx_error(dev, cmd); else kvaser_usb_leaf_usbcan_rx_error(dev, cmd); break; case CMD_TX_ACKNOWLEDGE: kvaser_usb_leaf_tx_acknowledge(dev, cmd); break; case CMD_ERROR_EVENT: kvaser_usb_leaf_error_event(dev, cmd); break; case CMD_GET_BUS_PARAMS_REPLY: kvaser_usb_leaf_get_busparams_reply(dev, cmd); break; case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: if (dev->driver_info->family != KVASER_USBCAN) goto warn; dev->card_data.usbcan_timestamp_msb = le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) & KVASER_USB_USBCAN_CLK_OVERFLOW_MASK; break; /* Ignored commands */ case CMD_FLUSH_QUEUE_REPLY: if (dev->driver_info->family != KVASER_LEAF) goto warn; break; default: warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id); break; } } static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev, void *buf, int len) { struct kvaser_cmd *cmd; int pos = 0; while (pos <= len - CMD_HEADER_LEN) { cmd = buf + pos; /* The Kvaser firmware can only read and write commands that * does not cross the USB's endpoint wMaxPacketSize boundary. * If a follow-up command crosses such boundary, firmware puts * a placeholder zero-length command in its place then aligns * the real command to the next max packet size. * * Handle such cases or we're going to miss a significant * number of events in case of a heavy rx load on the bus. */ if (cmd->len == 0) { pos = round_up(pos, le16_to_cpu (dev->bulk_in->wMaxPacketSize)); continue; } if (pos + cmd->len > len) { dev_err_ratelimited(&dev->intf->dev, "Format error\n"); break; } kvaser_usb_leaf_handle_command(dev, cmd); pos += cmd->len; } } static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_cmd *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_SET_CTRL_MODE; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode); cmd->u.ctrl_mode.tid = 0xff; cmd->u.ctrl_mode.channel = priv->channel; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; else cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); kfree(cmd); return rc; } static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; leaf->joining_bus = true; reinit_completion(&priv->start_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->start_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; reinit_completion(&priv->stop_comp); cancel_delayed_work(&leaf->chip_state_req_work); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->stop_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel) { return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel); } static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) { struct kvaser_cmd *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_FLUSH_QUEUE; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue); cmd->u.flush_queue.channel = priv->channel; cmd->u.flush_queue.flags = 0x00; rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); kfree(cmd); return rc; } static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; return 0; } static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf; leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL); if (!leaf) return -ENOMEM; leaf->net = priv; INIT_DELAYED_WORK(&leaf->chip_state_req_work, kvaser_usb_leaf_chip_state_req_work); priv->sub_priv = leaf; return 0; } static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; if (leaf) cancel_delayed_work_sync(&leaf->chip_state_req_work); } static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev, const struct kvaser_usb_busparams *busparams) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; int rc; cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_SET_BUS_PARAMS; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); cmd->u.busparams.channel = priv->channel; cmd->u.busparams.tid = 0xff; memcpy(&cmd->u.busparams.busparams, busparams, sizeof(cmd->u.busparams.busparams)); rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); kfree(cmd); return rc; } static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv) { int err; if (priv->dev->driver_info->family == KVASER_USBCAN) return -EOPNOTSUPP; reinit_completion(&priv->get_busparams_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->get_busparams_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_leaf_set_mode(struct net_device *netdev, enum can_mode mode) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; switch (mode) { case CAN_MODE_START: kvaser_usb_unlink_tx_urbs(priv); leaf->joining_bus = true; err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); if (err) return err; priv->can.state = CAN_STATE_ERROR_ACTIVE; break; default: return -EOPNOTSUPP; } return 0; } static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); *bec = priv->bec; return 0; } static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) { const struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i; iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint)) dev->bulk_in = endpoint; if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint)) dev->bulk_out = endpoint; /* use first bulk endpoint for in and out */ if (dev->bulk_in && dev->bulk_out) return 0; } return -ENODEV; } const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_set_mode = kvaser_usb_leaf_set_mode, .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, .dev_get_busparams = kvaser_usb_leaf_get_busparams, .dev_set_data_bittiming = NULL, .dev_get_data_busparams = NULL, .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, .dev_init_card = kvaser_usb_leaf_init_card, .dev_init_channel = kvaser_usb_leaf_init_channel, .dev_remove_channel = kvaser_usb_leaf_remove_channel, .dev_get_software_info = kvaser_usb_leaf_get_software_info, .dev_get_software_details = NULL, .dev_get_card_info = kvaser_usb_leaf_get_card_info, .dev_get_capabilities = kvaser_usb_leaf_get_capabilities, .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, .dev_start_chip = kvaser_usb_leaf_start_chip, .dev_stop_chip = kvaser_usb_leaf_stop_chip, .dev_reset_chip = kvaser_usb_leaf_reset_chip, .dev_flush_queue = kvaser_usb_leaf_flush_queue, .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, }; |
1 1 1 1 1 1 1 1 1 1 1 387 387 387 387 385 386 1 1 1 1 1 34 34 337 337 112 336 337 336 337 387 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 | // SPDX-License-Identifier: GPL-2.0-only /* * HID raw devices, giving access to raw HID events. * * In comparison to hiddev, this device does not process the * hid events at all (no parsing, no lookups). This lets applications * to work on raw hid events as they want to, and avoids a need to * use a transport-specific userspace libhid/libusb libraries. * * Copyright (c) 2007-2014 Jiri Kosina */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/mutex.h> #include <linux/sched/signal.h> #include <linux/string.h> #include <linux/hidraw.h> static int hidraw_major; static struct cdev hidraw_cdev; static const struct class hidraw_class = { .name = "hidraw", }; static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; static DECLARE_RWSEM(minors_rwsem); static inline bool hidraw_is_revoked(struct hidraw_list *list) { return list->revoked; } static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; int ret = 0, len; DECLARE_WAITQUEUE(wait, current); if (hidraw_is_revoked(list)) return -ENODEV; mutex_lock(&list->read_mutex); while (ret == 0) { if (list->head == list->tail) { add_wait_queue(&list->hidraw->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list->head == list->tail) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!list->hidraw->exist) { ret = -EIO; break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } /* allow O_NONBLOCK to work well from other threads */ mutex_unlock(&list->read_mutex); schedule(); mutex_lock(&list->read_mutex); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&list->hidraw->wait, &wait); } if (ret) goto out; len = list->buffer[list->tail].len > count ? count : list->buffer[list->tail].len; if (list->buffer[list->tail].value) { if (copy_to_user(buffer, list->buffer[list->tail].value, len)) { ret = -EFAULT; goto out; } ret = len; } kfree(list->buffer[list->tail].value); list->buffer[list->tail].value = NULL; list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); } out: mutex_unlock(&list->read_mutex); return ret; } /* * The first byte of the report buffer is expected to be a report number. */ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); struct hid_device *dev; __u8 *buf; int ret = 0; lockdep_assert_held(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } dev = hidraw_table[minor]->hid; if (count > HID_MAX_BUFFER_SIZE) { hid_warn(dev, "pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { hid_warn(dev, "pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = memdup_user(buffer, count); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out; } if ((report_type == HID_OUTPUT_REPORT) && !(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) { ret = __hid_hw_output_report(dev, buf, count, (u64)(long)file, false); /* * compatibility with old implementation of USB-HID and I2C-HID: * if the device does not support receiving output reports, * on an interrupt endpoint, fallback to SET_REPORT HID command. */ if (ret != -ENOSYS) goto out_free; } ret = __hid_hw_raw_request(dev, buf[0], buf, count, report_type, HID_REQ_SET_REPORT, (u64)(long)file, false); out_free: kfree(buf); out: return ret; } static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; ssize_t ret; down_read(&minors_rwsem); if (hidraw_is_revoked(list)) ret = -ENODEV; else ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); up_read(&minors_rwsem); return ret; } /* * This function performs a Get_Report transfer over the control endpoint * per section 7.2.1 of the HID specification, version 1.1. The first byte * of buffer is the report number to request, or 0x0 if the device does not * use numbered reports. The report_type parameter can be HID_FEATURE_REPORT * or HID_INPUT_REPORT. */ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); struct hid_device *dev; __u8 *buf; int ret = 0, len; unsigned char report_number; lockdep_assert_held(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } dev = hidraw_table[minor]->hid; if (!dev->ll_driver->raw_request) { ret = -ENODEV; goto out; } if (count > HID_MAX_BUFFER_SIZE) { hid_warn(dev, "pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { hid_warn(dev, "pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = kmalloc(count, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } /* * Read the first byte from the user. This is the report number, * which is passed to hid_hw_raw_request(). */ if (copy_from_user(&report_number, buffer, 1)) { ret = -EFAULT; goto out_free; } ret = __hid_hw_raw_request(dev, report_number, buf, count, report_type, HID_REQ_GET_REPORT, (u64)(long)file, false); if (ret < 0) goto out_free; len = (ret < count) ? ret : count; if (copy_to_user(buffer, buf, len)) { ret = -EFAULT; goto out_free; } ret = len; out_free: kfree(buf); out: return ret; } static __poll_t hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) mask |= EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist || hidraw_is_revoked(list)) mask |= EPOLLERR | EPOLLHUP; return mask; } static int hidraw_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct hidraw *dev; struct hidraw_list *list; unsigned long flags; int err = 0; if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) { err = -ENOMEM; goto out; } /* * Technically not writing to the hidraw_table but a write lock is * required to protect the device refcount. This is symmetrical to * hidraw_release(). */ down_write(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { err = -ENODEV; goto out_unlock; } dev = hidraw_table[minor]; if (!dev->open++) { err = hid_hw_power(dev->hid, PM_HINT_FULLON); if (err < 0) { dev->open--; goto out_unlock; } err = hid_hw_open(dev->hid); if (err < 0) { hid_hw_power(dev->hid, PM_HINT_NORMAL); dev->open--; goto out_unlock; } } list->hidraw = hidraw_table[minor]; mutex_init(&list->read_mutex); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); list_add_tail(&list->node, &hidraw_table[minor]->list); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); file->private_data = list; out_unlock: up_write(&minors_rwsem); out: if (err < 0) kfree(list); return err; } static int hidraw_fasync(int fd, struct file *file, int on) { struct hidraw_list *list = file->private_data; if (hidraw_is_revoked(list)) return -ENODEV; return fasync_helper(fd, file, on, &list->fasync); } static void drop_ref(struct hidraw *hidraw, int exists_bit) { if (exists_bit) { hidraw->exist = 0; if (hidraw->open) { hid_hw_close(hidraw->hid); wake_up_interruptible(&hidraw->wait); } device_destroy(&hidraw_class, MKDEV(hidraw_major, hidraw->minor)); } else { --hidraw->open; } if (!hidraw->open) { if (!hidraw->exist) { hidraw_table[hidraw->minor] = NULL; kfree(hidraw); } else { /* close device for last reader */ hid_hw_close(hidraw->hid); hid_hw_power(hidraw->hid, PM_HINT_NORMAL); } } } static int hidraw_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct hidraw_list *list = file->private_data; unsigned long flags; down_write(&minors_rwsem); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); while (list->tail != list->head) { kfree(list->buffer[list->tail].value); list->buffer[list->tail].value = NULL; list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); } list_del(&list->node); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); kfree(list); drop_ref(hidraw_table[minor], 0); up_write(&minors_rwsem); return 0; } static int hidraw_revoke(struct hidraw_list *list) { list->revoked = true; return 0; } static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); unsigned int minor = iminor(inode); long ret = 0; struct hidraw *dev; struct hidraw_list *list = file->private_data; void __user *user_arg = (void __user*) arg; down_read(&minors_rwsem); dev = hidraw_table[minor]; if (!dev || !dev->exist || hidraw_is_revoked(list)) { ret = -ENODEV; goto out; } switch (cmd) { case HIDIOCGRDESCSIZE: if (put_user(dev->hid->rsize, (int __user *)arg)) ret = -EFAULT; break; case HIDIOCGRDESC: { __u32 len; if (get_user(len, (int __user *)arg)) ret = -EFAULT; else if (len > HID_MAX_DESCRIPTOR_SIZE - 1) ret = -EINVAL; else if (copy_to_user(user_arg + offsetof( struct hidraw_report_descriptor, value[0]), dev->hid->rdesc, min(dev->hid->rsize, len))) ret = -EFAULT; break; } case HIDIOCGRAWINFO: { struct hidraw_devinfo dinfo; dinfo.bustype = dev->hid->bus; dinfo.vendor = dev->hid->vendor; dinfo.product = dev->hid->product; if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) ret = -EFAULT; break; } case HIDIOCREVOKE: { if (user_arg) ret = -EINVAL; else ret = hidraw_revoke(list); break; } default: { struct hid_device *hid = dev->hid; if (_IOC_TYPE(cmd) != 'H') { ret = -EINVAL; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT); break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT); break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT); break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT); break; } /* Begin Read-only ioctls. */ if (_IOC_DIR(cmd) != _IOC_READ) { ret = -EINVAL; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) { int len = strlen(hid->name) + 1; if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); ret = copy_to_user(user_arg, hid->name, len) ? -EFAULT : len; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) { int len = strlen(hid->phys) + 1; if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); ret = copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) { int len = strlen(hid->uniq) + 1; if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); ret = copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len; break; } } ret = -ENOTTY; } out: up_read(&minors_rwsem); return ret; } static const struct file_operations hidraw_ops = { .owner = THIS_MODULE, .read = hidraw_read, .write = hidraw_write, .poll = hidraw_poll, .open = hidraw_open, .release = hidraw_release, .unlocked_ioctl = hidraw_ioctl, .fasync = hidraw_fasync, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { struct hidraw *dev = hid->hidraw; struct hidraw_list *list; int ret = 0; unsigned long flags; spin_lock_irqsave(&dev->list_lock, flags); list_for_each_entry(list, &dev->list, node) { int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1); if (hidraw_is_revoked(list) || new_head == list->tail) continue; if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) { ret = -ENOMEM; break; } list->buffer[list->head].len = len; list->head = new_head; kill_fasync(&list->fasync, SIGIO, POLL_IN); } spin_unlock_irqrestore(&dev->list_lock, flags); wake_up_interruptible(&dev->wait); return ret; } EXPORT_SYMBOL_GPL(hidraw_report_event); int hidraw_connect(struct hid_device *hid) { int minor, result; struct hidraw *dev; /* we accept any HID device, all applications */ dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL); if (!dev) return -ENOMEM; result = -EINVAL; down_write(&minors_rwsem); for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) { if (hidraw_table[minor]) continue; hidraw_table[minor] = dev; result = 0; break; } if (result) { up_write(&minors_rwsem); kfree(dev); goto out; } dev->dev = device_create(&hidraw_class, &hid->dev, MKDEV(hidraw_major, minor), NULL, "%s%d", "hidraw", minor); if (IS_ERR(dev->dev)) { hidraw_table[minor] = NULL; up_write(&minors_rwsem); result = PTR_ERR(dev->dev); kfree(dev); goto out; } init_waitqueue_head(&dev->wait); spin_lock_init(&dev->list_lock); INIT_LIST_HEAD(&dev->list); dev->hid = hid; dev->minor = minor; dev->exist = 1; hid->hidraw = dev; up_write(&minors_rwsem); out: return result; } EXPORT_SYMBOL_GPL(hidraw_connect); void hidraw_disconnect(struct hid_device *hid) { struct hidraw *hidraw = hid->hidraw; down_write(&minors_rwsem); drop_ref(hidraw, 1); up_write(&minors_rwsem); } EXPORT_SYMBOL_GPL(hidraw_disconnect); int __init hidraw_init(void) { int result; dev_t dev_id; result = alloc_chrdev_region(&dev_id, HIDRAW_FIRST_MINOR, HIDRAW_MAX_DEVICES, "hidraw"); if (result < 0) { pr_warn("can't get major number\n"); goto out; } hidraw_major = MAJOR(dev_id); result = class_register(&hidraw_class); if (result) goto error_cdev; cdev_init(&hidraw_cdev, &hidraw_ops); result = cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES); if (result < 0) goto error_class; pr_info("raw HID events driver (C) Jiri Kosina\n"); out: return result; error_class: class_unregister(&hidraw_class); error_cdev: unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); goto out; } void hidraw_exit(void) { dev_t dev_id = MKDEV(hidraw_major, 0); cdev_del(&hidraw_cdev); class_unregister(&hidraw_class); unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); } |
2 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 | // SPDX-License-Identifier: GPL-2.0-or-later /* * SPCA508 chip based cameras subdriver * * Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "spca508" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/SPCA508 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 subtype; #define CreativeVista 0 #define HamaUSBSightcam 1 #define HamaUSBSightcam2 2 #define IntelEasyPCCamera 3 #define MicroInnovationIC200 4 #define ViewQuestVQ110 5 }; static const struct v4l2_pix_format sif_mode[] = { {160, 120, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3}, {176, 144, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /* Frame packet header offsets for the spca508 */ #define SPCA508_OFFSET_DATA 37 /* * Initialization data: this is the first set-up data written to the * device (before the open data). */ static const u16 spca508_init_data[][2] = { {0x0000, 0x870b}, {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */ {0x0003, 0x8111}, /* Reset compression & memory */ {0x0000, 0x8110}, /* Disable all outputs */ /* READ {0x0000, 0x8114} -> 0000: 00 */ {0x0000, 0x8114}, /* SW GPIO data */ {0x0008, 0x8110}, /* Enable charge pump output */ {0x0002, 0x8116}, /* 200 kHz pump clock */ /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE:) */ {0x0003, 0x8111}, /* Reset compression & memory */ {0x0000, 0x8111}, /* Normal mode (not reset) */ {0x0098, 0x8110}, /* Enable charge pump output, sync.serial,external 2x clock */ {0x000d, 0x8114}, /* SW GPIO data */ {0x0002, 0x8116}, /* 200 kHz pump clock */ {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */ /* --------------------------------------- */ {0x000f, 0x8402}, /* memory bank */ {0x0000, 0x8403}, /* ... address */ /* --------------------------------------- */ /* 0x88__ is Synchronous Serial Interface. */ /* TBD: This table could be expressed more compactly */ /* using spca508_write_i2c_vector(). */ /* TBD: Should see if the values in spca50x_i2c_data */ /* would work with the VQ110 instead of the values */ /* below. */ {0x00c0, 0x8804}, /* SSI slave addr */ {0x0008, 0x8802}, /* 375 Khz SSI clock */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, /* 375 Khz SSI clock */ {0x0012, 0x8801}, /* SSI reg addr */ {0x0080, 0x8800}, /* SSI data to write */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, /* 375 Khz SSI clock */ {0x0012, 0x8801}, /* SSI reg addr */ {0x0000, 0x8800}, /* SSI data to write */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, /* 375 Khz SSI clock */ {0x0011, 0x8801}, /* SSI reg addr */ {0x0040, 0x8800}, /* SSI data to write */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0013, 0x8801}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0014, 0x8801}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0015, 0x8801}, {0x0001, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0016, 0x8801}, {0x0003, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0017, 0x8801}, {0x0036, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0018, 0x8801}, {0x00ec, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x001a, 0x8801}, {0x0094, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x001b, 0x8801}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0027, 0x8801}, {0x00a2, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0028, 0x8801}, {0x0040, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x002a, 0x8801}, {0x0084, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x002b, 0x8801}, {0x00a8, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x002c, 0x8801}, {0x00fe, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x002d, 0x8801}, {0x0003, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0038, 0x8801}, {0x0083, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0033, 0x8801}, {0x0081, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0034, 0x8801}, {0x004a, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0039, 0x8801}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0010, 0x8801}, {0x00a8, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0006, 0x8801}, {0x0058, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0000, 0x8801}, {0x0004, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0040, 0x8801}, {0x0080, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0041, 0x8801}, {0x000c, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0042, 0x8801}, {0x000c, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0043, 0x8801}, {0x0028, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0044, 0x8801}, {0x0080, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0045, 0x8801}, {0x0020, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0046, 0x8801}, {0x0020, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0047, 0x8801}, {0x0080, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0048, 0x8801}, {0x004c, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x0049, 0x8801}, {0x0084, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x004a, 0x8801}, {0x0084, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x0008, 0x8802}, {0x004b, 0x8801}, {0x0084, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* --------------------------------------- */ {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */ {0x0000, 0x8701}, /* CKx1 clock delay adj */ {0x0000, 0x8701}, /* CKx1 clock delay adj */ {0x0001, 0x870c}, /* CKOx2 output */ /* --------------------------------------- */ {0x0080, 0x8600}, /* Line memory read counter (L) */ {0x0001, 0x8606}, /* reserved */ {0x0064, 0x8607}, /* Line memory read counter (H) 0x6480=25,728 */ {0x002a, 0x8601}, /* CDSP sharp interpolation mode, * line sel for color sep, edge enhance enab */ {0x0000, 0x8602}, /* optical black level for user settng = 0 */ {0x0080, 0x8600}, /* Line memory read counter (L) */ {0x000a, 0x8603}, /* optical black level calc mode: * auto; optical black offset = 10 */ {0x00df, 0x865b}, /* Horiz offset for valid pixels (L)=0xdf */ {0x0012, 0x865c}, /* Vert offset for valid lines (L)=0x12 */ /* The following two lines seem to be the "wrong" resolution. */ /* But perhaps these indicate the actual size of the sensor */ /* rather than the size of the current video mode. */ {0x0058, 0x865d}, /* Horiz valid pixels (*4) (L) = 352 */ {0x0048, 0x865e}, /* Vert valid lines (*4) (L) = 288 */ {0x0015, 0x8608}, /* A11 Coef ... */ {0x0030, 0x8609}, {0x00fb, 0x860a}, {0x003e, 0x860b}, {0x00ce, 0x860c}, {0x00f4, 0x860d}, {0x00eb, 0x860e}, {0x00dc, 0x860f}, {0x0039, 0x8610}, {0x0001, 0x8611}, /* R offset for white balance ... */ {0x0000, 0x8612}, {0x0001, 0x8613}, {0x0000, 0x8614}, {0x005b, 0x8651}, /* R gain for white balance ... */ {0x0040, 0x8652}, {0x0060, 0x8653}, {0x0040, 0x8654}, {0x0000, 0x8655}, {0x0001, 0x863f}, /* Fixed gamma correction enable, USB control, * lum filter disable, lum noise clip disable */ {0x00a1, 0x8656}, /* Window1 size 256x256, Windows2 size 64x64, * gamma look-up disable, * new edge enhancement enable */ {0x0018, 0x8657}, /* Edge gain high thresh */ {0x0020, 0x8658}, /* Edge gain low thresh */ {0x000a, 0x8659}, /* Edge bandwidth high threshold */ {0x0005, 0x865a}, /* Edge bandwidth low threshold */ /* -------------------------------- */ {0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0xa908, 0x8802}, {0x0034, 0x8801}, /* SSI reg addr */ {0x00ca, 0x8800}, /* SSI data to write */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0x1f08, 0x8802}, {0x0006, 0x8801}, {0x0080, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* ----- Read back coefs we wrote earlier. */ /* READ { 0x0000, 0x8608 } -> 0000: 15 */ /* READ { 0x0000, 0x8609 } -> 0000: 30 */ /* READ { 0x0000, 0x860a } -> 0000: fb */ /* READ { 0x0000, 0x860b } -> 0000: 3e */ /* READ { 0x0000, 0x860c } -> 0000: ce */ /* READ { 0x0000, 0x860d } -> 0000: f4 */ /* READ { 0x0000, 0x860e } -> 0000: eb */ /* READ { 0x0000, 0x860f } -> 0000: dc */ /* READ { 0x0000, 0x8610 } -> 0000: 39 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 08 */ {0xb008, 0x8802}, {0x0006, 0x8801}, {0x007d, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* This chunk is seemingly redundant with */ /* earlier commands (A11 Coef...), but if I disable it, */ /* the image appears too dark. Maybe there was some kind of */ /* reset since the earlier commands, so this is necessary again. */ {0x0015, 0x8608}, {0x0030, 0x8609}, {0xfffb, 0x860a}, {0x003e, 0x860b}, {0xffce, 0x860c}, {0xfff4, 0x860d}, {0xffeb, 0x860e}, {0xffdc, 0x860f}, {0x0039, 0x8610}, {0x0018, 0x8657}, {0x0000, 0x8508}, /* Disable compression. */ /* Previous line was: {0x0021, 0x8508}, * Enable compression. */ {0x0032, 0x850b}, /* compression stuff */ {0x0003, 0x8509}, /* compression stuff */ {0x0011, 0x850a}, /* compression stuff */ {0x0021, 0x850d}, /* compression stuff */ {0x0010, 0x850c}, /* compression stuff */ {0x0003, 0x8500}, /* *** Video mode: 160x120 */ {0x0001, 0x8501}, /* Hardware-dominated snap control */ {0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128, * gamma look-up disable, * new edge enhancement enable */ {0x0018, 0x8617}, /* Window1 start X (*2) */ {0x0008, 0x8618}, /* Window1 start Y (*2) */ {0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128, * gamma look-up disable, * new edge enhancement enable */ {0x0058, 0x8619}, /* Window2 start X (*2) */ {0x0008, 0x861a}, /* Window2 start Y (*2) */ {0x00ff, 0x8615}, /* High lum thresh for white balance */ {0x0000, 0x8616}, /* Low lum thresh for white balance */ {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */ {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */ /* READ { 0x0000, 0x8656 } -> 0000: 61 */ {0x0028, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 28 */ {0x1f28, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */ {0x0010, 0x8801}, /* SSI reg addr */ {0x003e, 0x8800}, /* SSI data to write */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ {0x0028, 0x8802}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 28 */ {0x1f28, 0x8802}, {0x0000, 0x8801}, {0x001f, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ {0x0001, 0x8602}, /* optical black level for user settning = 1 */ /* Original: */ {0x0023, 0x8700}, /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */ {0x000f, 0x8602}, /* optical black level for user settning = 15 */ {0x0028, 0x8802}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 28 */ {0x1f28, 0x8802}, {0x0010, 0x8801}, {0x007b, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ {0x002f, 0x8651}, /* R gain for white balance ... */ {0x0080, 0x8653}, /* READ { 0x0000, 0x8655 } -> 0000: 00 */ {0x0000, 0x8655}, {0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */ {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */ /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE: (ALT=0) ) */ {} }; /* * Initialization data for Intel EasyPC Camera CS110 */ static const u16 spca508cs110_init_data[][2] = { {0x0000, 0x870b}, /* Reset CTL3 */ {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */ {0x0000, 0x8111}, /* Normal operation on reset */ {0x0090, 0x8110}, /* External Clock 2x & Synchronous Serial Interface Output */ {0x0020, 0x8112}, /* Video Drop packet enable */ {0x0000, 0x8114}, /* Software GPIO output data */ {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0003, 0x8114}, /* Initial sequence Synchronous Serial Interface */ {0x000f, 0x8402}, /* Memory bank Address */ {0x0000, 0x8403}, /* Memory bank Address */ {0x00ba, 0x8804}, /* SSI Slave address */ {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */ {0x0010, 0x8802}, /* 93.75kHz SSI Clock two DataByte */ {0x0001, 0x8801}, {0x000a, 0x8805}, /* a - NWG: Dunno what this is about */ {0x0000, 0x8800}, {0x0010, 0x8802}, {0x0002, 0x8801}, {0x0000, 0x8805}, {0x0000, 0x8800}, {0x0010, 0x8802}, {0x0003, 0x8801}, {0x0027, 0x8805}, {0x0001, 0x8800}, {0x0010, 0x8802}, {0x0004, 0x8801}, {0x0065, 0x8805}, {0x0001, 0x8800}, {0x0010, 0x8802}, {0x0005, 0x8801}, {0x0003, 0x8805}, {0x0000, 0x8800}, {0x0010, 0x8802}, {0x0006, 0x8801}, {0x001c, 0x8805}, {0x0000, 0x8800}, {0x0010, 0x8802}, {0x0007, 0x8801}, {0x002a, 0x8805}, {0x0000, 0x8800}, {0x0010, 0x8802}, {0x0002, 0x8704}, /* External input CKIx1 */ {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */ {0x009a, 0x8600}, /* Line memory Read Counter (L) */ {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */ {0x0003, 0x865c}, /* 3 Vertical Offset for Valid Lines(L) */ {0x0058, 0x865d}, /* 58 Horizontal Valid Pixel Window(L) */ {0x0006, 0x8660}, /* Nibble data + input order */ {0x000a, 0x8602}, /* Optical black level set to 0x0a */ {0x0000, 0x8603}, /* Optical black level Offset */ /* {0x0000, 0x8611}, * 0 R Offset for white Balance */ /* {0x0000, 0x8612}, * 1 Gr Offset for white Balance */ /* {0x0000, 0x8613}, * 1f B Offset for white Balance */ /* {0x0000, 0x8614}, * f0 Gb Offset for white Balance */ {0x0040, 0x8651}, /* 2b BLUE gain for white balance good at all 60 */ {0x0030, 0x8652}, /* 41 Gr Gain for white Balance (L) */ {0x0035, 0x8653}, /* 26 RED gain for white balance */ {0x0035, 0x8654}, /* 40Gb Gain for white Balance (L) */ {0x0041, 0x863f}, /* Fixed Gamma correction enabled (makes colours look better) */ {0x0000, 0x8655}, /* High bits for white balance*****brightness control*** */ {} }; static const u16 spca508_sightcam_init_data[][2] = { /* This line seems to setup the frame/canvas */ {0x000f, 0x8402}, /* These 6 lines are needed to startup the webcam */ {0x0090, 0x8110}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0003, 0x8114}, {0x0080, 0x8804}, /* This part seems to make the pictures darker? (autobrightness?) */ {0x0001, 0x8801}, {0x0004, 0x8800}, {0x0003, 0x8801}, {0x00e0, 0x8800}, {0x0004, 0x8801}, {0x00b4, 0x8800}, {0x0005, 0x8801}, {0x0000, 0x8800}, {0x0006, 0x8801}, {0x00e0, 0x8800}, {0x0007, 0x8801}, {0x000c, 0x8800}, /* This section is just needed, it probably * does something like the previous section, * but the cam won't start if it's not included. */ {0x0014, 0x8801}, {0x0008, 0x8800}, {0x0015, 0x8801}, {0x0067, 0x8800}, {0x0016, 0x8801}, {0x0000, 0x8800}, {0x0017, 0x8801}, {0x0020, 0x8800}, {0x0018, 0x8801}, {0x0044, 0x8800}, /* Makes the picture darker - and the * cam won't start if not included */ {0x001e, 0x8801}, {0x00ea, 0x8800}, {0x001f, 0x8801}, {0x0001, 0x8800}, {0x0003, 0x8801}, {0x00e0, 0x8800}, /* seems to place the colors ontop of each other #1 */ {0x0006, 0x8704}, {0x0001, 0x870c}, {0x0016, 0x8600}, {0x0002, 0x8606}, /* if not included the pictures becomes _very_ dark */ {0x0064, 0x8607}, {0x003a, 0x8601}, {0x0000, 0x8602}, /* seems to place the colors ontop of each other #2 */ {0x0016, 0x8600}, {0x0018, 0x8617}, {0x0008, 0x8618}, {0x00a1, 0x8656}, /* webcam won't start if not included */ {0x0007, 0x865b}, {0x0001, 0x865c}, {0x0058, 0x865d}, {0x0048, 0x865e}, /* adjusts the colors */ {0x0049, 0x8651}, {0x0040, 0x8652}, {0x004c, 0x8653}, {0x0040, 0x8654}, {} }; static const u16 spca508_sightcam2_init_data[][2] = { {0x0020, 0x8112}, {0x000f, 0x8402}, {0x0000, 0x8403}, {0x0008, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x0009, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x000a, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x000b, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x000c, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x000d, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x000e, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x0007, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x000f, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x0018, 0x8660}, {0x0010, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x0011, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x0000, 0x86b0}, {0x0034, 0x86b1}, {0x0000, 0x86b2}, {0x0049, 0x86b3}, {0x0000, 0x86b4}, {0x0000, 0x86b4}, {0x0012, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x0013, 0x8201}, {0x0008, 0x8200}, {0x0001, 0x8200}, {0x0001, 0x86b0}, {0x00aa, 0x86b1}, {0x0000, 0x86b2}, {0x00e4, 0x86b3}, {0x0000, 0x86b4}, {0x0000, 0x86b4}, {0x0018, 0x8660}, {0x0090, 0x8110}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0003, 0x8114}, {0x0080, 0x8804}, {0x0003, 0x8801}, {0x0012, 0x8800}, {0x0004, 0x8801}, {0x0005, 0x8800}, {0x0005, 0x8801}, {0x0000, 0x8800}, {0x0006, 0x8801}, {0x0000, 0x8800}, {0x0007, 0x8801}, {0x0000, 0x8800}, {0x0008, 0x8801}, {0x0005, 0x8800}, {0x000a, 0x8700}, {0x000e, 0x8801}, {0x0004, 0x8800}, {0x0005, 0x8801}, {0x0047, 0x8800}, {0x0006, 0x8801}, {0x0000, 0x8800}, {0x0007, 0x8801}, {0x00c0, 0x8800}, {0x0008, 0x8801}, {0x0003, 0x8800}, {0x0013, 0x8801}, {0x0001, 0x8800}, {0x0009, 0x8801}, {0x0000, 0x8800}, {0x000a, 0x8801}, {0x0000, 0x8800}, {0x000b, 0x8801}, {0x0000, 0x8800}, {0x000c, 0x8801}, {0x0000, 0x8800}, {0x000e, 0x8801}, {0x0004, 0x8800}, {0x000f, 0x8801}, {0x0000, 0x8800}, {0x0010, 0x8801}, {0x0006, 0x8800}, {0x0011, 0x8801}, {0x0006, 0x8800}, {0x0012, 0x8801}, {0x0000, 0x8800}, {0x0013, 0x8801}, {0x0001, 0x8800}, {0x000a, 0x8700}, {0x0000, 0x8702}, {0x0000, 0x8703}, {0x00c2, 0x8704}, {0x0001, 0x870c}, {0x0044, 0x8600}, {0x0002, 0x8606}, {0x0064, 0x8607}, {0x003a, 0x8601}, {0x0008, 0x8602}, {0x0044, 0x8600}, {0x0018, 0x8617}, {0x0008, 0x8618}, {0x00a1, 0x8656}, {0x0004, 0x865b}, {0x0002, 0x865c}, {0x0058, 0x865d}, {0x0048, 0x865e}, {0x0012, 0x8608}, {0x002c, 0x8609}, {0x0002, 0x860a}, {0x002c, 0x860b}, {0x00db, 0x860c}, {0x00f9, 0x860d}, {0x00f1, 0x860e}, {0x00e3, 0x860f}, {0x002c, 0x8610}, {0x006c, 0x8651}, {0x0041, 0x8652}, {0x0059, 0x8653}, {0x0040, 0x8654}, {0x00fa, 0x8611}, {0x00ff, 0x8612}, {0x00f8, 0x8613}, {0x0000, 0x8614}, {0x0001, 0x863f}, {0x0000, 0x8640}, {0x0026, 0x8641}, {0x0045, 0x8642}, {0x0060, 0x8643}, {0x0075, 0x8644}, {0x0088, 0x8645}, {0x009b, 0x8646}, {0x00b0, 0x8647}, {0x00c5, 0x8648}, {0x00d2, 0x8649}, {0x00dc, 0x864a}, {0x00e5, 0x864b}, {0x00eb, 0x864c}, {0x00f0, 0x864d}, {0x00f6, 0x864e}, {0x00fa, 0x864f}, {0x00ff, 0x8650}, {0x0060, 0x8657}, {0x0010, 0x8658}, {0x0018, 0x8659}, {0x0005, 0x865a}, {0x0018, 0x8660}, {0x0003, 0x8509}, {0x0011, 0x850a}, {0x0032, 0x850b}, {0x0010, 0x850c}, {0x0021, 0x850d}, {0x0001, 0x8500}, {0x0000, 0x8508}, {0x0012, 0x8608}, {0x002c, 0x8609}, {0x0002, 0x860a}, {0x0039, 0x860b}, {0x00d0, 0x860c}, {0x00f7, 0x860d}, {0x00ed, 0x860e}, {0x00db, 0x860f}, {0x0039, 0x8610}, {0x0012, 0x8657}, {0x000c, 0x8619}, {0x0004, 0x861a}, {0x00a1, 0x8656}, {0x00c8, 0x8615}, {0x0032, 0x8616}, {0x0030, 0x8112}, {0x0020, 0x8112}, {0x0020, 0x8112}, {0x000f, 0x8402}, {0x0000, 0x8403}, {0x0090, 0x8110}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0003, 0x8114}, {0x0080, 0x8804}, {0x0003, 0x8801}, {0x0012, 0x8800}, {0x0004, 0x8801}, {0x0005, 0x8800}, {0x0005, 0x8801}, {0x0047, 0x8800}, {0x0006, 0x8801}, {0x0000, 0x8800}, {0x0007, 0x8801}, {0x00c0, 0x8800}, {0x0008, 0x8801}, {0x0003, 0x8800}, {0x000a, 0x8700}, {0x000e, 0x8801}, {0x0004, 0x8800}, {0x0005, 0x8801}, {0x0047, 0x8800}, {0x0006, 0x8801}, {0x0000, 0x8800}, {0x0007, 0x8801}, {0x00c0, 0x8800}, {0x0008, 0x8801}, {0x0003, 0x8800}, {0x0013, 0x8801}, {0x0001, 0x8800}, {0x0009, 0x8801}, {0x0000, 0x8800}, {0x000a, 0x8801}, {0x0000, 0x8800}, {0x000b, 0x8801}, {0x0000, 0x8800}, {0x000c, 0x8801}, {0x0000, 0x8800}, {0x000e, 0x8801}, {0x0004, 0x8800}, {0x000f, 0x8801}, {0x0000, 0x8800}, {0x0010, 0x8801}, {0x0006, 0x8800}, {0x0011, 0x8801}, {0x0006, 0x8800}, {0x0012, 0x8801}, {0x0000, 0x8800}, {0x0013, 0x8801}, {0x0001, 0x8800}, {0x000a, 0x8700}, {0x0000, 0x8702}, {0x0000, 0x8703}, {0x00c2, 0x8704}, {0x0001, 0x870c}, {0x0044, 0x8600}, {0x0002, 0x8606}, {0x0064, 0x8607}, {0x003a, 0x8601}, {0x0008, 0x8602}, {0x0044, 0x8600}, {0x0018, 0x8617}, {0x0008, 0x8618}, {0x00a1, 0x8656}, {0x0004, 0x865b}, {0x0002, 0x865c}, {0x0058, 0x865d}, {0x0048, 0x865e}, {0x0012, 0x8608}, {0x002c, 0x8609}, {0x0002, 0x860a}, {0x002c, 0x860b}, {0x00db, 0x860c}, {0x00f9, 0x860d}, {0x00f1, 0x860e}, {0x00e3, 0x860f}, {0x002c, 0x8610}, {0x006c, 0x8651}, {0x0041, 0x8652}, {0x0059, 0x8653}, {0x0040, 0x8654}, {0x00fa, 0x8611}, {0x00ff, 0x8612}, {0x00f8, 0x8613}, {0x0000, 0x8614}, {0x0001, 0x863f}, {0x0000, 0x8640}, {0x0026, 0x8641}, {0x0045, 0x8642}, {0x0060, 0x8643}, {0x0075, 0x8644}, {0x0088, 0x8645}, {0x009b, 0x8646}, {0x00b0, 0x8647}, {0x00c5, 0x8648}, {0x00d2, 0x8649}, {0x00dc, 0x864a}, {0x00e5, 0x864b}, {0x00eb, 0x864c}, {0x00f0, 0x864d}, {0x00f6, 0x864e}, {0x00fa, 0x864f}, {0x00ff, 0x8650}, {0x0060, 0x8657}, {0x0010, 0x8658}, {0x0018, 0x8659}, {0x0005, 0x865a}, {0x0018, 0x8660}, {0x0003, 0x8509}, {0x0011, 0x850a}, {0x0032, 0x850b}, {0x0010, 0x850c}, {0x0021, 0x850d}, {0x0001, 0x8500}, {0x0000, 0x8508}, {0x0012, 0x8608}, {0x002c, 0x8609}, {0x0002, 0x860a}, {0x0039, 0x860b}, {0x00d0, 0x860c}, {0x00f7, 0x860d}, {0x00ed, 0x860e}, {0x00db, 0x860f}, {0x0039, 0x8610}, {0x0012, 0x8657}, {0x0064, 0x8619}, /* This line starts it all, it is not needed here */ /* since it has been build into the driver */ /* jfm: don't start now */ /* {0x0030, 0x8112}, */ {} }; /* * Initialization data for Creative Webcam Vista */ static const u16 spca508_vista_init_data[][2] = { {0x0008, 0x8200}, /* Clear register */ {0x0000, 0x870b}, /* Reset CTL3 */ {0x0020, 0x8112}, /* Video Drop packet enable */ {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */ {0x0000, 0x8110}, /* Disable everything */ {0x0000, 0x8114}, /* Software GPIO output data */ {0x0000, 0x8114}, {0x0003, 0x8111}, {0x0000, 0x8111}, {0x0090, 0x8110}, /* Enable: SSI output, External 2X clock output */ {0x0020, 0x8112}, {0x0000, 0x8114}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0001, 0x8114}, {0x0003, 0x8114}, {0x000f, 0x8402}, /* Memory bank Address */ {0x0000, 0x8403}, /* Memory bank Address */ {0x00ba, 0x8804}, /* SSI Slave address */ {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, /* Will write 2 bytes (DATA1+DATA2) */ {0x0020, 0x8801}, /* Register address for SSI read/write */ {0x0044, 0x8805}, /* DATA2 */ {0x0004, 0x8800}, /* DATA1 -> write triggered */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0009, 0x8801}, {0x0042, 0x8805}, {0x0001, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x003c, 0x8801}, {0x0001, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0001, 0x8801}, {0x000a, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0002, 0x8801}, {0x0000, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0003, 0x8801}, {0x0027, 0x8805}, {0x0001, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0004, 0x8801}, {0x0065, 0x8805}, {0x0001, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0005, 0x8801}, {0x0003, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0006, 0x8801}, {0x001c, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0007, 0x8801}, {0x002a, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x000e, 0x8801}, {0x0000, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0028, 0x8801}, {0x002e, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0039, 0x8801}, {0x0013, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x003b, 0x8801}, {0x000c, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0035, 0x8801}, {0x0028, 0x8805}, {0x0000, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8803 } -> 0000: 00 */ /* READ { 0x0001, 0x8802 } -> 0000: 10 */ {0x0010, 0x8802}, {0x0009, 0x8801}, {0x0042, 0x8805}, {0x0001, 0x8800}, /* READ { 0x0001, 0x8803 } -> 0000: 00 */ {0x0050, 0x8703}, {0x0002, 0x8704}, /* External input CKIx1 */ {0x0001, 0x870c}, /* Select CKOx2 output */ {0x009a, 0x8600}, /* Line memory Read Counter (L) */ {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */ {0x0023, 0x8601}, {0x0010, 0x8602}, {0x000a, 0x8603}, {0x009a, 0x8600}, {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */ {0x0003, 0x865c}, /* Vertical offset for valid lines (L) */ {0x0058, 0x865d}, /* Horizontal valid pixels window (L) */ {0x0048, 0x865e}, /* Vertical valid lines window (L) */ {0x0000, 0x865f}, {0x0006, 0x8660}, /* Enable nibble data input, select nibble input order */ {0x0013, 0x8608}, /* A11 Coeficients for color correction */ {0x0028, 0x8609}, /* Note: these values are confirmed at the end of array */ {0x0005, 0x860a}, /* ... */ {0x0025, 0x860b}, {0x00e1, 0x860c}, {0x00fa, 0x860d}, {0x00f4, 0x860e}, {0x00e8, 0x860f}, {0x0025, 0x8610}, /* A33 Coef. */ {0x00fc, 0x8611}, /* White balance offset: R */ {0x0001, 0x8612}, /* White balance offset: Gr */ {0x00fe, 0x8613}, /* White balance offset: B */ {0x0000, 0x8614}, /* White balance offset: Gb */ {0x0064, 0x8651}, /* R gain for white balance (L) */ {0x0040, 0x8652}, /* Gr gain for white balance (L) */ {0x0066, 0x8653}, /* B gain for white balance (L) */ {0x0040, 0x8654}, /* Gb gain for white balance (L) */ {0x0001, 0x863f}, /* Enable fixed gamma correction */ {0x00a1, 0x8656}, /* Size - Window1: 256x256, Window2: 128x128, * UV division: UV no change, * Enable New edge enhancement */ {0x0018, 0x8657}, /* Edge gain high threshold */ {0x0020, 0x8658}, /* Edge gain low threshold */ {0x000a, 0x8659}, /* Edge bandwidth high threshold */ {0x0005, 0x865a}, /* Edge bandwidth low threshold */ {0x0064, 0x8607}, /* UV filter enable */ {0x0016, 0x8660}, {0x0000, 0x86b0}, /* Bad pixels compensation address */ {0x00dc, 0x86b1}, /* X coord for bad pixels compensation (L) */ {0x0000, 0x86b2}, {0x0009, 0x86b3}, /* Y coord for bad pixels compensation (L) */ {0x0000, 0x86b4}, {0x0001, 0x86b0}, {0x00f5, 0x86b1}, {0x0000, 0x86b2}, {0x00c6, 0x86b3}, {0x0000, 0x86b4}, {0x0002, 0x86b0}, {0x001c, 0x86b1}, {0x0001, 0x86b2}, {0x00d7, 0x86b3}, {0x0000, 0x86b4}, {0x0003, 0x86b0}, {0x001c, 0x86b1}, {0x0001, 0x86b2}, {0x00d8, 0x86b3}, {0x0000, 0x86b4}, {0x0004, 0x86b0}, {0x001d, 0x86b1}, {0x0001, 0x86b2}, {0x00d8, 0x86b3}, {0x0000, 0x86b4}, {0x001e, 0x8660}, /* READ { 0x0000, 0x8608 } -> 0000: 13 */ /* READ { 0x0000, 0x8609 } -> 0000: 28 */ /* READ { 0x0000, 0x8610 } -> 0000: 05 */ /* READ { 0x0000, 0x8611 } -> 0000: 25 */ /* READ { 0x0000, 0x8612 } -> 0000: e1 */ /* READ { 0x0000, 0x8613 } -> 0000: fa */ /* READ { 0x0000, 0x8614 } -> 0000: f4 */ /* READ { 0x0000, 0x8615 } -> 0000: e8 */ /* READ { 0x0000, 0x8616 } -> 0000: 25 */ {} }; static int reg_write(struct gspca_dev *gspca_dev, u16 index, u16 value) { int ret; struct usb_device *dev = gspca_dev->dev; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, /* request */ USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); gspca_dbg(gspca_dev, D_USBO, "reg write i:0x%04x = 0x%02x\n", index, value); if (ret < 0) pr_err("reg write: error %d\n", ret); return ret; } /* read 1 byte */ /* returns: negative is error, pos or zero is data */ static int reg_read(struct gspca_dev *gspca_dev, u16 index) /* wIndex */ { int ret; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, /* register */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, 1, 500); /* timeout */ gspca_dbg(gspca_dev, D_USBI, "reg read i:%04x --> %02x\n", index, gspca_dev->usb_buf[0]); if (ret < 0) { pr_err("reg_read err %d\n", ret); return ret; } return gspca_dev->usb_buf[0]; } /* send 1 or 2 bytes to the sensor via the Synchronous Serial Interface */ static int ssi_w(struct gspca_dev *gspca_dev, u16 reg, u16 val) { int ret, retry; ret = reg_write(gspca_dev, 0x8802, reg >> 8); if (ret < 0) goto out; ret = reg_write(gspca_dev, 0x8801, reg & 0x00ff); if (ret < 0) goto out; if ((reg & 0xff00) == 0x1000) { /* if 2 bytes */ ret = reg_write(gspca_dev, 0x8805, val & 0x00ff); if (ret < 0) goto out; val >>= 8; } ret = reg_write(gspca_dev, 0x8800, val); if (ret < 0) goto out; /* poll until not busy */ retry = 10; for (;;) { ret = reg_read(gspca_dev, 0x8803); if (ret < 0) break; if (gspca_dev->usb_buf[0] == 0) break; if (--retry <= 0) { gspca_err(gspca_dev, "ssi_w busy %02x\n", gspca_dev->usb_buf[0]); ret = -1; break; } msleep(8); } out: return ret; } static int write_vector(struct gspca_dev *gspca_dev, const u16 (*data)[2]) { int ret = 0; while ((*data)[1] != 0) { if ((*data)[1] & 0x8000) { if ((*data)[1] == 0xdd00) /* delay */ msleep((*data)[0]); else ret = reg_write(gspca_dev, (*data)[1], (*data)[0]); } else { ret = ssi_w(gspca_dev, (*data)[1], (*data)[0]); } if (ret < 0) break; data++; } return ret; } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; const u16 (*init_data)[2]; static const u16 (*(init_data_tb[]))[2] = { spca508_vista_init_data, /* CreativeVista 0 */ spca508_sightcam_init_data, /* HamaUSBSightcam 1 */ spca508_sightcam2_init_data, /* HamaUSBSightcam2 2 */ spca508cs110_init_data, /* IntelEasyPCCamera 3 */ spca508cs110_init_data, /* MicroInnovationIC200 4 */ spca508_init_data, /* ViewQuestVQ110 5 */ }; int data1, data2; /* Read from global register the USB product and vendor IDs, just to * prove that we can communicate with the device. This works, which * confirms at we are communicating properly and that the device * is a 508. */ data1 = reg_read(gspca_dev, 0x8104); data2 = reg_read(gspca_dev, 0x8105); gspca_dbg(gspca_dev, D_PROBE, "Webcam Vendor ID: 0x%02x%02x\n", data2, data1); data1 = reg_read(gspca_dev, 0x8106); data2 = reg_read(gspca_dev, 0x8107); gspca_dbg(gspca_dev, D_PROBE, "Webcam Product ID: 0x%02x%02x\n", data2, data1); data1 = reg_read(gspca_dev, 0x8621); gspca_dbg(gspca_dev, D_PROBE, "Window 1 average luminance: %d\n", data1); cam = &gspca_dev->cam; cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); sd->subtype = id->driver_info; init_data = init_data_tb[sd->subtype]; return write_vector(gspca_dev, init_data); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } static int sd_start(struct gspca_dev *gspca_dev) { int mode; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; reg_write(gspca_dev, 0x8500, mode); switch (mode) { case 0: case 1: reg_write(gspca_dev, 0x8700, 0x28); /* clock */ break; default: /* case 2: */ /* case 3: */ reg_write(gspca_dev, 0x8700, 0x23); /* clock */ break; } reg_write(gspca_dev, 0x8112, 0x10 | 0x20); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* Video ISO disable, Video Drop Packet enable: */ reg_write(gspca_dev, 0x8112, 0x20); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { switch (data[0]) { case 0: /* start of frame */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); data += SPCA508_OFFSET_DATA; len -= SPCA508_OFFSET_DATA; gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); break; case 0xff: /* drop */ break; default: data += 1; len -= 1; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); break; } } static void setbrightness(struct gspca_dev *gspca_dev, s32 brightness) { /* MX seem contrast */ reg_write(gspca_dev, 0x8651, brightness); reg_write(gspca_dev, 0x8652, brightness); reg_write(gspca_dev, 0x8653, brightness); reg_write(gspca_dev, 0x8654, brightness); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam}, {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista}, {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110}, {USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam}, {USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2}, {USB_DEVICE(0x8086, 0x0110), .driver_info = IntelEasyPCCamera}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
9 9 9 9 9 9 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 | // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for UC-Logic devices not fully compliant with HID standard * - original and fixed report descriptors * * Copyright (c) 2010-2017 Nikolai Kondrashov * Copyright (c) 2013 Martin Rusko */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include "hid-uclogic-rdesc.h" #include <linux/slab.h> #include <linux/unaligned.h> #include <kunit/visibility.h> /* Fixed WP4030U report descriptor */ const __u8 uclogic_rdesc_wp4030u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0xB8, 0x0B, /* Physical Maximum (3000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp4030u_fixed_size = sizeof(uclogic_rdesc_wp4030u_fixed_arr); /* Fixed WP5540U report descriptor */ const __u8 uclogic_rdesc_wp5540u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x7C, 0x15, /* Physical Maximum (5500), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x15, 0x81, /* Logical Minimum (-127), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp5540u_fixed_size = sizeof(uclogic_rdesc_wp5540u_fixed_arr); /* Fixed WP8060U report descriptor */ const __u8 uclogic_rdesc_wp8060u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x70, 0x17, /* Physical Maximum (6000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x15, 0x81, /* Logical Minimum (-127), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp8060u_fixed_size = sizeof(uclogic_rdesc_wp8060u_fixed_arr); /* Fixed WP1062 report descriptor */ const __u8 uclogic_rdesc_wp1062_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x10, 0x27, /* Physical Maximum (10000), */ 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0xB7, 0x19, /* Physical Maximum (6583), */ 0x26, 0x6E, 0x33, /* Logical Maximum (13166), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp1062_fixed_size = sizeof(uclogic_rdesc_wp1062_fixed_arr); /* Fixed PF1209 report descriptor */ const __u8 uclogic_rdesc_pf1209_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0xE0, 0x2E, /* Physical Maximum (12000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x28, 0x23, /* Physical Maximum (9000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x15, 0x81, /* Logical Minimum (-127), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_pf1209_fixed_size = sizeof(uclogic_rdesc_pf1209_fixed_arr); /* Fixed PID 0522 tablet report descriptor, interface 0 (stylus) */ const __u8 uclogic_rdesc_twhl850_fixed0_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x81, 0x02, /* Input (Variable), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x09, 0x32, /* Usage (In Range), */ 0x81, 0x02, /* Input (Variable), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x75, 0x10, /* Report Size (16), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */ 0x26, 0x00, 0x7D, /* Logical Maximum (32000), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x88, 0x13, /* Physical Maximum (5000), */ 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twhl850_fixed0_size = sizeof(uclogic_rdesc_twhl850_fixed0_arr); /* Fixed PID 0522 tablet report descriptor, interface 1 (mouse) */ const __u8 uclogic_rdesc_twhl850_fixed1_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x01, /* Report ID (1), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x05, 0x09, /* Usage Page (Button), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twhl850_fixed1_size = sizeof(uclogic_rdesc_twhl850_fixed1_arr); /* Fixed PID 0522 tablet report descriptor, interface 2 (frame buttons) */ const __u8 uclogic_rdesc_twhl850_fixed2_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x06, /* Usage (Keyboard), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x03, /* Report ID (3), */ 0x05, 0x07, /* Usage Page (Keyboard), */ 0x14, /* Logical Minimum (0), */ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x02, /* Input (Variable), */ 0x18, /* Usage Minimum (None), */ 0x29, 0xFF, /* Usage Maximum (FFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x06, /* Report Count (6), */ 0x80, /* Input, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twhl850_fixed2_size = sizeof(uclogic_rdesc_twhl850_fixed2_arr); /* Fixed TWHA60 report descriptor, interface 0 (stylus) */ const __u8 uclogic_rdesc_twha60_fixed0_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x10, 0x27, /* Physical Maximum (10000), */ 0x27, 0x3F, 0x9C, 0x00, 0x00, /* Logical Maximum (39999), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x6A, 0x18, /* Physical Maximum (6250), */ 0x26, 0xA7, 0x61, /* Logical Maximum (24999), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twha60_fixed0_size = sizeof(uclogic_rdesc_twha60_fixed0_arr); /* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */ const __u8 uclogic_rdesc_twha60_fixed1_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x06, /* Usage (Keyboard), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x05, /* Report ID (5), */ 0x05, 0x07, /* Usage Page (Keyboard), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0x95, 0x0C, /* Report Count (12), */ 0x19, 0x3A, /* Usage Minimum (KB F1), */ 0x29, 0x45, /* Usage Maximum (KB F12), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x19, 0x68, /* Usage Minimum (KB F13), */ 0x29, 0x73, /* Usage Maximum (KB F24), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twha60_fixed1_size = sizeof(uclogic_rdesc_twha60_fixed1_arr); /* Fixed report descriptor template for (tweaked) v1 pen reports */ const __u8 uclogic_rdesc_v1_pen_template_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x07, /* Report ID (7), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM), /* Logical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v1_pen_template_size = sizeof(uclogic_rdesc_v1_pen_template_arr); /* Fixed report descriptor template for (tweaked) v2 pen reports */ const __u8 uclogic_rdesc_v2_pen_template_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x75, 0x18, /* Report Size (24), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x75, 0x10, /* Report Size (16), */ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM), /* Logical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x54, /* Unit Exponent (0), */ 0x65, 0x14, /* Unit (Degrees), */ 0x35, 0xC4, /* Physical Minimum (-60), */ 0x45, 0x3C, /* Physical Maximum (60), */ 0x15, 0xC4, /* Logical Minimum (-60), */ 0x25, 0x3C, /* Logical Maximum (60), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x09, 0x3D, /* Usage (X Tilt), */ 0x09, 0x3E, /* Usage (Y Tilt), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_pen_template_size = sizeof(uclogic_rdesc_v2_pen_template_arr); /* * Expand to the contents of a generic frame buttons report descriptor. * * @_id: The report ID to use. * @_size: Size of the report to pad to, including report ID, bytes. */ #define UCLOGIC_RDESC_FRAME_BUTTONS_BYTES(_id, _size) \ 0x05, 0x01, /* Usage Page (Desktop), */ \ 0x09, 0x07, /* Usage (Keypad), */ \ 0xA1, 0x01, /* Collection (Application), */ \ 0x85, (_id), /* Report ID (_id), */ \ 0x14, /* Logical Minimum (0), */ \ 0x25, 0x01, /* Logical Maximum (1), */ \ 0x75, 0x01, /* Report Size (1), */ \ 0x05, 0x0D, /* Usage Page (Digitizer), */ \ 0x09, 0x39, /* Usage (Tablet Function Keys), */ \ 0xA0, /* Collection (Physical), */ \ 0x09, 0x44, /* Usage (Barrel Switch), */ \ 0x95, 0x01, /* Report Count (1), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0x05, 0x01, /* Usage Page (Desktop), */ \ 0x09, 0x30, /* Usage (X), */ \ 0x09, 0x31, /* Usage (Y), */ \ 0x95, 0x02, /* Report Count (2), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0x95, 0x15, /* Report Count (21), */ \ 0x81, 0x01, /* Input (Constant), */ \ 0x05, 0x09, /* Usage Page (Button), */ \ 0x19, 0x01, /* Usage Minimum (01h), */ \ 0x29, 0x0A, /* Usage Maximum (0Ah), */ \ 0x95, 0x0A, /* Report Count (10), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0xC0, /* End Collection, */ \ 0x05, 0x01, /* Usage Page (Desktop), */ \ 0x09, 0x05, /* Usage (Gamepad), */ \ 0xA0, /* Collection (Physical), */ \ 0x05, 0x09, /* Usage Page (Button), */ \ 0x19, 0x01, /* Usage Minimum (01h), */ \ 0x29, 0x0A, /* Usage Maximum (0Ah), */ \ 0x95, 0x0A, /* Report Count (10), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0x95, ((_size) * 8 - 52), \ /* Report Count (padding), */ \ 0x81, 0x01, /* Input (Constant), */ \ 0xC0, /* End Collection, */ \ 0xC0 /* End Collection */ /* Fixed report descriptor for (tweaked) v1 frame reports */ const __u8 uclogic_rdesc_v1_frame_arr[] = { UCLOGIC_RDESC_FRAME_BUTTONS_BYTES(UCLOGIC_RDESC_V1_FRAME_ID, 8) }; const size_t uclogic_rdesc_v1_frame_size = sizeof(uclogic_rdesc_v1_frame_arr); /* Fixed report descriptor for (tweaked) v2 frame button reports */ const __u8 uclogic_rdesc_v2_frame_buttons_arr[] = { UCLOGIC_RDESC_FRAME_BUTTONS_BYTES(UCLOGIC_RDESC_V2_FRAME_BUTTONS_ID, 12) }; const size_t uclogic_rdesc_v2_frame_buttons_size = sizeof(uclogic_rdesc_v2_frame_buttons_arr); /* Fixed report descriptor for (tweaked) v2 frame touch ring reports */ const __u8 uclogic_rdesc_v2_frame_touch_ring_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V2_FRAME_TOUCH_ID, /* Report ID (TOUCH_ID), */ 0x14, /* Logical Minimum (0), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x09, 0x01, /* Usage (01h), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x0B, /* Logical Maximum (11), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x2E, /* Report Count (46), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_frame_touch_ring_size = sizeof(uclogic_rdesc_v2_frame_touch_ring_arr); /* Fixed report descriptor for (tweaked) v2 frame touch strip reports */ const __u8 uclogic_rdesc_v2_frame_touch_strip_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V2_FRAME_TOUCH_ID, /* Report ID (TOUCH_ID), */ 0x14, /* Logical Minimum (0), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x09, 0x01, /* Usage (01h), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x33, /* Usage (Rx), */ 0x09, 0x34, /* Usage (Ry), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x2E, /* Report Count (46), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_frame_touch_strip_size = sizeof(uclogic_rdesc_v2_frame_touch_strip_arr); /* Fixed report descriptor for (tweaked) v2 frame dial reports */ const __u8 uclogic_rdesc_v2_frame_dial_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V2_FRAME_DIAL_ID, /* Report ID (DIAL_ID), */ 0x14, /* Logical Minimum (0), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x09, /* Usage Page (Button), */ 0x09, 0x01, /* Usage (01h), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x2E, /* Report Count (46), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_frame_dial_size = sizeof(uclogic_rdesc_v2_frame_dial_arr); const __u8 uclogic_ugee_v2_probe_arr[] = { 0x02, 0xb0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; const size_t uclogic_ugee_v2_probe_size = sizeof(uclogic_ugee_v2_probe_arr); const int uclogic_ugee_v2_probe_endpoint = 0x03; /* Fixed report descriptor template for UGEE v2 pen reports */ const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[] = { 0x05, 0x0d, /* Usage Page (Digitizers), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xa1, 0x01, /* Collection (Application), */ 0x85, 0x02, /* Report ID (2), */ 0x09, 0x20, /* Usage (Stylus), */ 0xa1, 0x00, /* Collection (Physical), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x35, 0x00, /* Physical Minimum (0), */ 0xa4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0x0d, /* Unit Exponent (-3), */ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xb4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x45, 0x00, /* Physical Maximum (0), */ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM), /* Logical Maximum (PLACEHOLDER), */ 0x75, 0x0D, /* Report Size (13), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x3d, /* Usage (X Tilt), */ 0x35, 0xC3, /* Physical Minimum (-61), */ 0x45, 0x3C, /* Physical Maximum (60), */ 0x15, 0xC3, /* Logical Minimum (-61), */ 0x25, 0x3C, /* Logical Maximum (60), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x3e, /* Usage (Y Tilt), */ 0x35, 0xC3, /* Physical Minimum (-61), */ 0x45, 0x3C, /* Physical Maximum (60), */ 0x15, 0xC3, /* Logical Minimum (-61), */ 0x25, 0x3C, /* Logical Maximum (60), */ 0x81, 0x02, /* Input (Variable), */ 0xc0, /* End Collection, */ 0xc0, /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_pen_template_size = sizeof(uclogic_rdesc_ugee_v2_pen_template_arr); /* Fixed report descriptor template for UGEE v2 frame reports (buttons only) */ const __u8 uclogic_rdesc_ugee_v2_frame_btn_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V1_FRAME_ID, /* Report ID, */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ UCLOGIC_RDESC_FRAME_PH_BTN, /* Usage Maximum (PLACEHOLDER), */ 0x95, 0x0A, /* Report Count (10), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x46, /* Report Count (70), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_frame_btn_template_size = sizeof(uclogic_rdesc_ugee_v2_frame_btn_template_arr); /* Fixed report descriptor template for UGEE v2 frame reports (dial) */ const __u8 uclogic_rdesc_ugee_v2_frame_dial_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V1_FRAME_ID, /* Report ID, */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ UCLOGIC_RDESC_FRAME_PH_BTN, /* Usage Maximum (PLACEHOLDER), */ 0x95, 0x0A, /* Report Count (10), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_frame_dial_template_size = sizeof(uclogic_rdesc_ugee_v2_frame_dial_template_arr); /* Fixed report descriptor template for UGEE v2 frame reports (mouse) */ const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x01, /* Report ID (1), */ 0x05, 0x01, /* Usage Page (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x02, /* Usage Maximum (02h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Generic Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x02, /* Report Count (2), */ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size = sizeof(uclogic_rdesc_ugee_v2_frame_mouse_template_arr); /* Fixed report descriptor template for UGEE v2 battery reports */ const __u8 uclogic_rdesc_ugee_v2_battery_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_UGEE_V2_BATTERY_ID, /* Report ID, */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x84, /* Usage Page (Power Device), */ 0x05, 0x85, /* Usage Page (Battery System), */ 0x09, 0x65, /* Usage Page (AbsoluteStateOfCharge), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xff, 0x00, /* Logical Maximum (255), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x09, 0x44, /* Usage Page (Charging), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_battery_template_size = sizeof(uclogic_rdesc_ugee_v2_battery_template_arr); /* Fixed report descriptor for Ugee EX07 frame */ const __u8 uclogic_rdesc_ugee_ex07_frame_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x06, /* Report ID (6), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x05, 0x09, /* Usage Page (Button), */ 0x75, 0x01, /* Report Size (1), */ 0x19, 0x03, /* Usage Minimum (03h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x1A, /* Report Count (26), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x02, /* Usage Maximum (02h), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_ex07_frame_size = sizeof(uclogic_rdesc_ugee_ex07_frame_arr); /* Fixed report descriptor for Ugee G5 frame controls */ const __u8 uclogic_rdesc_ugee_g5_frame_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x06, /* Report ID (6), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x05, /* Usage Maximum (05h), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x0B, /* Report Count (11), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x02, /* Report Size (2), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_g5_frame_size = sizeof(uclogic_rdesc_ugee_g5_frame_arr); /* Fixed report descriptor for XP-Pen Deco 01 frame controls */ const __u8 uclogic_rdesc_xppen_deco01_frame_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x06, /* Report ID (6), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x08, /* Usage Maximum (08h), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x15, /* Report Count (21), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_xppen_deco01_frame_size = sizeof(uclogic_rdesc_xppen_deco01_frame_arr); /** * uclogic_rdesc_template_apply() - apply report descriptor parameters to a * report descriptor template, creating a report descriptor. Copies the * template over to the new report descriptor and replaces every occurrence of * the template placeholders, followed by an index byte, with the value from the * parameter list at that index. * * @template_ptr: Pointer to the template buffer. * @template_size: Size of the template buffer. * @param_list: List of template parameters. * @param_num: Number of parameters in the list. * * Returns: * Kmalloc-allocated pointer to the created report descriptor, * or NULL if allocation failed. */ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr, size_t template_size, const s32 *param_list, size_t param_num) { static const __u8 btn_head[] = {UCLOGIC_RDESC_FRAME_PH_BTN_HEAD}; static const __u8 pen_head[] = {UCLOGIC_RDESC_PEN_PH_HEAD}; __u8 *rdesc_ptr; __u8 *p; s32 v; rdesc_ptr = kmemdup(template_ptr, template_size, GFP_KERNEL); if (rdesc_ptr == NULL) return NULL; for (p = rdesc_ptr; p + sizeof(btn_head) < rdesc_ptr + template_size;) { if (p + sizeof(pen_head) < rdesc_ptr + template_size && memcmp(p, pen_head, sizeof(pen_head)) == 0 && p[sizeof(pen_head)] < param_num) { v = param_list[p[sizeof(pen_head)]]; put_unaligned((__force u32)cpu_to_le32(v), (s32 *)p); p += sizeof(pen_head) + 1; } else if (memcmp(p, btn_head, sizeof(btn_head)) == 0 && p[sizeof(btn_head)] < param_num) { v = param_list[p[sizeof(btn_head)]]; put_unaligned((__u8)0x2A, p); /* Usage Maximum */ put_unaligned((__force u16)cpu_to_le16(v), (s16 *)(p + 1)); p += sizeof(btn_head) + 1; } else { p++; } } return rdesc_ptr; } EXPORT_SYMBOL_IF_KUNIT(uclogic_rdesc_template_apply); |
3 2 2 2 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 | /* SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2005 David Brownell */ #ifndef __LINUX_SPI_H #define __LINUX_SPI_H #include <linux/acpi.h> #include <linux/bits.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/kthread.h> #include <linux/mod_devicetable.h> #include <linux/overflow.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/u64_stats_sync.h> #include <uapi/linux/spi/spi.h> /* Max no. of CS supported per spi device */ #define SPI_CS_CNT_MAX 16 struct dma_chan; struct software_node; struct ptp_system_timestamp; struct spi_controller; struct spi_transfer; struct spi_controller_mem_ops; struct spi_controller_mem_caps; struct spi_message; /* * INTERFACES between SPI master-side drivers and SPI slave protocol handlers, * and SPI infrastructure. */ extern const struct bus_type spi_bus_type; /** * struct spi_statistics - statistics for spi transfers * @syncp: seqcount to protect members in this struct for per-cpu update * on 32-bit systems * * @messages: number of spi-messages handled * @transfers: number of spi_transfers handled * @errors: number of errors during spi_transfer * @timedout: number of timeouts during spi_transfer * * @spi_sync: number of times spi_sync is used * @spi_sync_immediate: * number of times spi_sync is executed immediately * in calling context without queuing and scheduling * @spi_async: number of times spi_async is used * * @bytes: number of bytes transferred to/from device * @bytes_tx: number of bytes sent to device * @bytes_rx: number of bytes received from device * * @transfer_bytes_histo: * transfer bytes histogram * * @transfers_split_maxsize: * number of transfers that have been split because of * maxsize limit */ struct spi_statistics { struct u64_stats_sync syncp; u64_stats_t messages; u64_stats_t transfers; u64_stats_t errors; u64_stats_t timedout; u64_stats_t spi_sync; u64_stats_t spi_sync_immediate; u64_stats_t spi_async; u64_stats_t bytes; u64_stats_t bytes_rx; u64_stats_t bytes_tx; #define SPI_STATISTICS_HISTO_SIZE 17 u64_stats_t transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; u64_stats_t transfers_split_maxsize; }; #define SPI_STATISTICS_ADD_TO_FIELD(pcpu_stats, field, count) \ do { \ struct spi_statistics *__lstats; \ get_cpu(); \ __lstats = this_cpu_ptr(pcpu_stats); \ u64_stats_update_begin(&__lstats->syncp); \ u64_stats_add(&__lstats->field, count); \ u64_stats_update_end(&__lstats->syncp); \ put_cpu(); \ } while (0) #define SPI_STATISTICS_INCREMENT_FIELD(pcpu_stats, field) \ do { \ struct spi_statistics *__lstats; \ get_cpu(); \ __lstats = this_cpu_ptr(pcpu_stats); \ u64_stats_update_begin(&__lstats->syncp); \ u64_stats_inc(&__lstats->field); \ u64_stats_update_end(&__lstats->syncp); \ put_cpu(); \ } while (0) /** * struct spi_delay - SPI delay information * @value: Value for the delay * @unit: Unit for the delay */ struct spi_delay { #define SPI_DELAY_UNIT_USECS 0 #define SPI_DELAY_UNIT_NSECS 1 #define SPI_DELAY_UNIT_SCK 2 u16 value; u8 unit; }; extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer); extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, struct spi_transfer *xfer); /** * struct spi_device - Controller side proxy for an SPI slave device * @dev: Driver model representation of the device. * @controller: SPI controller used with the device. * @max_speed_hz: Maximum clock rate to be used with this chip * (on this board); may be changed by the device's driver. * The spi_transfer.speed_hz can override this for each transfer. * @chip_select: Array of physical chipselect, spi->chipselect[i] gives * the corresponding physical CS for logical CS i. * @mode: The spi mode defines how data is clocked out and in. * This may be changed by the device's driver. * The "active low" default for chipselect mode can be overridden * (by specifying SPI_CS_HIGH) as can the "MSB first" default for * each word in a transfer (by specifying SPI_LSB_FIRST). * @bits_per_word: Data transfers involve one or more words; word sizes * like eight or 12 bits are common. In-memory wordsizes are * powers of two bytes (e.g. 20 bit samples use 32 bits). * This may be changed by the device's driver, or left at the * default (0) indicating protocol words are eight bit bytes. * The spi_transfer.bits_per_word can override this for each transfer. * @rt: Make the pump thread real time priority. * @irq: Negative, or the number passed to request_irq() to receive * interrupts from this device. * @controller_state: Controller's runtime state * @controller_data: Board-specific definitions for controller, such as * FIFO initialization parameters; from board_info.controller_data * @modalias: Name of the driver to use with this device, or an alias * for that name. This appears in the sysfs "modalias" attribute * for driver coldplugging, and in uevents used for hotplugging * @driver_override: If the name of a driver is written to this attribute, then * the device will bind to the named driver and only the named driver. * Do not set directly, because core frees it; use driver_set_override() to * set or clear it. * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines * (optional, NULL when not using a GPIO line) * @word_delay: delay to be inserted between consecutive * words of a transfer * @cs_setup: delay to be introduced by the controller after CS is asserted * @cs_hold: delay to be introduced by the controller before CS is deasserted * @cs_inactive: delay to be introduced by the controller after CS is * deasserted. If @cs_change_delay is used from @spi_transfer, then the * two delays will be added up. * @pcpu_statistics: statistics for the spi_device * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array * * A @spi_device is used to interchange data between an SPI slave * (usually a discrete chip) and CPU memory. * * In @dev, the platform_data is used to hold information about this * device that's meaningful to the device's protocol driver, but not * to its controller. One example might be an identifier for a chip * variant with slightly different functionality; another might be * information about how this particular board wires the chip's pins. */ struct spi_device { struct device dev; struct spi_controller *controller; u32 max_speed_hz; u8 chip_select[SPI_CS_CNT_MAX]; u8 bits_per_word; bool rt; #define SPI_NO_TX BIT(31) /* No transmit wire */ #define SPI_NO_RX BIT(30) /* No receive wire */ /* * TPM specification defines flow control over SPI. Client device * can insert a wait state on MISO when address is transmitted by * controller on MOSI. Detecting the wait state in software is only * possible for full duplex controllers. For controllers that support * only half-duplex, the wait state detection needs to be implemented * in hardware. TPM devices would set this flag when hardware flow * control is expected from SPI controller. */ #define SPI_TPM_HW_FLOW BIT(29) /* TPM HW flow control */ /* * All bits defined above should be covered by SPI_MODE_KERNEL_MASK. * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart, * which is defined in 'include/uapi/linux/spi/spi.h'. * The bits defined here are from bit 31 downwards, while in * SPI_MODE_USER_MASK are from 0 upwards. * These bits must not overlap. A static assert check should make sure of that. * If adding extra bits, make sure to decrease the bit index below as well. */ #define SPI_MODE_KERNEL_MASK (~(BIT(29) - 1)) u32 mode; int irq; void *controller_state; void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */ struct spi_delay word_delay; /* Inter-word delay */ /* CS delays */ struct spi_delay cs_setup; struct spi_delay cs_hold; struct spi_delay cs_inactive; /* The statistics */ struct spi_statistics __percpu *pcpu_statistics; /* Bit mask of the chipselect(s) that the driver need to use from * the chipselect array.When the controller is capable to handle * multiple chip selects & memories are connected in parallel * then more than one bit need to be set in cs_index_mask. */ u32 cs_index_mask : SPI_CS_CNT_MAX; /* * Likely need more hooks for more protocol options affecting how * the controller talks to each chip, like: * - memory packing (12 bit samples into low bits, others zeroed) * - priority * - chipselect delays * - ... */ }; /* Make sure that SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK don't overlap */ static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0, "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap"); static inline struct spi_device *to_spi_device(const struct device *dev) { return dev ? container_of(dev, struct spi_device, dev) : NULL; } /* Most drivers won't need to care about device refcounting */ static inline struct spi_device *spi_dev_get(struct spi_device *spi) { return (spi && get_device(&spi->dev)) ? spi : NULL; } static inline void spi_dev_put(struct spi_device *spi) { if (spi) put_device(&spi->dev); } /* ctldata is for the bus_controller driver's runtime state */ static inline void *spi_get_ctldata(const struct spi_device *spi) { return spi->controller_state; } static inline void spi_set_ctldata(struct spi_device *spi, void *state) { spi->controller_state = state; } /* Device driver data */ static inline void spi_set_drvdata(struct spi_device *spi, void *data) { dev_set_drvdata(&spi->dev, data); } static inline void *spi_get_drvdata(const struct spi_device *spi) { return dev_get_drvdata(&spi->dev); } static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx) { return spi->chip_select[idx]; } static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect) { spi->chip_select[idx] = chipselect; } static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx) { return spi->cs_gpiod[idx]; } static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod) { spi->cs_gpiod[idx] = csgpiod; } static inline bool spi_is_csgpiod(struct spi_device *spi) { u8 idx; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { if (spi_get_csgpiod(spi, idx)) return true; } return false; } /** * struct spi_driver - Host side "protocol" driver * @id_table: List of SPI devices supported by this driver * @probe: Binds this driver to the SPI device. Drivers can verify * that the device is actually present, and may need to configure * characteristics (such as bits_per_word) which weren't needed for * the initial configuration done during system setup. * @remove: Unbinds this driver from the SPI device * @shutdown: Standard shutdown callback used during system state * transitions such as powerdown/halt and kexec * @driver: SPI device drivers should initialize the name and owner * field of this structure. * * This represents the kind of device driver that uses SPI messages to * interact with the hardware at the other end of a SPI link. It's called * a "protocol" driver because it works through messages rather than talking * directly to SPI hardware (which is what the underlying SPI controller * driver does to pass those messages). These protocols are defined in the * specification for the device(s) supported by the driver. * * As a rule, those device protocols represent the lowest level interface * supported by a driver, and it will support upper level interfaces too. * Examples of such upper levels include frameworks like MTD, networking, * MMC, RTC, filesystem character device nodes, and hardware monitoring. */ struct spi_driver { const struct spi_device_id *id_table; int (*probe)(struct spi_device *spi); void (*remove)(struct spi_device *spi); void (*shutdown)(struct spi_device *spi); struct device_driver driver; }; #define to_spi_driver(__drv) \ ( __drv ? container_of_const(__drv, struct spi_driver, driver) : NULL ) extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); /** * spi_unregister_driver - reverse effect of spi_register_driver * @sdrv: the driver to unregister * Context: can sleep */ static inline void spi_unregister_driver(struct spi_driver *sdrv) { if (sdrv) driver_unregister(&sdrv->driver); } extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select); /* Use a define to avoid include chaining to get THIS_MODULE */ #define spi_register_driver(driver) \ __spi_register_driver(THIS_MODULE, driver) /** * module_spi_driver() - Helper macro for registering a SPI driver * @__spi_driver: spi_driver struct * * Helper macro for SPI drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_spi_driver(__spi_driver) \ module_driver(__spi_driver, spi_register_driver, \ spi_unregister_driver) /** * struct spi_controller - interface to SPI master or slave controller * @dev: device interface to this driver * @list: link with the global spi_controller list * @bus_num: board-specific (and often SOC-specific) identifier for a * given SPI controller. * @num_chipselect: chipselects are used to distinguish individual * SPI slaves, and are numbered from zero to num_chipselects. * each slave has a chipselect signal, but it's common that not * every chipselect is connected to a slave. * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @mode_bits: flags understood by this controller driver * @buswidth_override_bits: flags to override for this controller driver * @bits_per_word_mask: A mask indicating which values of bits_per_word are * supported by the driver. Bit n indicates that a bits_per_word n+1 is * supported. If set, the SPI core will reject any transfer with an * unsupported bits_per_word. If not set, this value is simply ignored, * and it's up to the individual driver to perform any validation. * @min_speed_hz: Lowest supported transfer speed * @max_speed_hz: Highest supported transfer speed * @flags: other constraints relevant to this driver * @slave: indicates that this is an SPI slave controller * @target: indicates that this is an SPI target controller * @devm_allocated: whether the allocation of this struct is devres-managed * @max_transfer_size: function that returns the max transfer size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. * @max_message_size: function that returns the max message size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. * @io_mutex: mutex for physical bus access * @add_lock: mutex to avoid adding devices to the same chipselect * @bus_lock_spinlock: spinlock for SPI bus locking * @bus_lock_mutex: mutex for exclusion of multiple callers * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This * must fail if an unrecognized or unsupported mode is requested. * It's always safe to call this unless transfers are pending on * the device whose settings are being modified. * @set_cs_timing: optional hook for SPI devices to request SPI master * controller for configuring specific CS setup time, hold time and inactive * delay interms of clock counts * @transfer: adds a message to the controller's transfer queue. * @cleanup: frees controller-specific state * @can_dma: determine whether this controller supports DMA * @dma_map_dev: device which can be used for DMA mapping * @cur_rx_dma_dev: device which is currently used for RX DMA mapping * @cur_tx_dma_dev: device which is currently used for TX DMA mapping * @queued: whether this controller is providing an internal message queue * @kworker: pointer to thread struct for message pump * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to synchronise access to message queue * @queue: message queue * @cur_msg: the currently in-flight message * @cur_msg_completion: a completion for the current in-flight message * @cur_msg_incomplete: Flag used internally to opportunistically skip * the @cur_msg_completion. This flag is used to check if the driver has * already called spi_finalize_current_message(). * @cur_msg_need_completion: Flag used internally to opportunistically skip * the @cur_msg_completion. This flag is used to signal the context that * is running spi_finalize_current_message() that it needs to complete() * @fallback: fallback to PIO if DMA transfer return failure with * SPI_TRANS_FAIL_NO_START. * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected * @last_cs_index_mask: bit mask the last chip selects that were used * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy * @running: message pump is running * @rt: whether this queue is set to run as a realtime task * @auto_runtime_pm: the core should ensure a runtime PM reference is held * while the hardware is prepared, using the parent * device for the spidev * @max_dma_len: Maximum length of a DMA transfer for the device. * @prepare_transfer_hardware: a message will soon arrive from the queue * so the subsystem requests the driver to prepare the transfer hardware * by issuing this call * @transfer_one_message: the subsystem calls the driver to transfer a single * message while queuing transfers that arrive in the meantime. When the * driver is finished with this message, it must call * spi_finalize_current_message() so the subsystem can issue the next * message * @unprepare_transfer_hardware: there are currently no more messages on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call * * @set_cs: set the logic level of the chip select line. May be called * from interrupt context. * @optimize_message: optimize the message for reuse * @unoptimize_message: release resources allocated by optimize_message * @prepare_message: set up the controller to transfer a single message, * for example doing DMA mapping. Called from threaded * context. * @transfer_one: transfer a single spi_transfer. * * - return 0 if the transfer is finished, * - return 1 if the transfer is still in progress. When * the driver is finished with this transfer it must * call spi_finalize_current_transfer() so the subsystem * can issue the next transfer. If the transfer fails, the * driver must set the flag SPI_TRANS_FAIL_IO to * spi_transfer->error first, before calling * spi_finalize_current_transfer(). * Note: transfer_one and transfer_one_message are mutually * exclusive; when both are set, the generic subsystem does * not call your transfer_one callback. * @handle_err: the subsystem calls the driver to handle an error that occurs * in the generic implementation of transfer_one_message(). * @mem_ops: optimized/dedicated operations for interactions with SPI memory. * This field is optional and should only be implemented if the * controller has native support for memory like operations. * @mem_caps: controller capabilities for the handling of memory operations. * @unprepare_message: undo any work done by prepare_message(). * @target_abort: abort the ongoing transfer request on an SPI target controller * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS * number. Any individual value may be NULL for CS lines that * are not GPIOs (driven by the SPI controller itself). * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab * GPIO descriptors. This will fill in @cs_gpiods and SPI devices will have * the cs_gpiod assigned if a GPIO line is found for the chipselect. * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will * fill in this field with the first unused native CS, to be used by SPI * controller drivers that need to drive a native CS when using GPIO CS. * @max_native_cs: When cs_gpiods is used, and this field is filled in, * spi_register_controller() will validate all native CS (including the * unused native CS) against this value. * @pcpu_statistics: statistics for the spi_controller * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel * @dummy_rx: dummy receive buffer for full-duplex devices * @dummy_tx: dummy transmit buffer for full-duplex devices * @fw_translate_cs: If the boot firmware uses different numbering scheme * what Linux expects, this optional hook can be used to translate * between the two. * @ptp_sts_supported: If the driver sets this to true, it must provide a * time snapshot in @spi_transfer->ptp_sts as close as possible to the * moment in time when @spi_transfer->ptp_sts_word_pre and * @spi_transfer->ptp_sts_word_post were transmitted. * If the driver does not set this, the SPI core takes the snapshot as * close to the driver hand-over as possible. * @irq_flags: Interrupt enable state during PTP system timestamping * @queue_empty: signal green light for opportunistically skipping the queue * for spi_sync transfers. * @must_async: disable all fast paths in the core * @defer_optimize_message: set to true if controller cannot pre-optimize messages * and needs to defer the optimization step until the message is actually * being transferred * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals * but not chip select signals. Each device may be configured to use a * different clock rate, since those shared signals are ignored unless * the chip is selected. * * The driver for an SPI controller manages access to those devices through * a queue of spi_message transactions, copying data between CPU memory and * an SPI slave device. For each such message it queues, it calls the * message's completion function when the transaction completes. */ struct spi_controller { struct device dev; struct list_head list; /* * Other than negative (== assign one dynamically), bus_num is fully * board-specific. Usually that simplifies to being SoC-specific. * example: one SoC has three SPI controllers, numbered 0..2, * and one board's schematics might show it using SPI-2. Software * would normally use bus_num=2 for that controller. */ s16 bus_num; /* * Chipselects will be integral to many controllers; some others * might use board-specific GPIOs. */ u16 num_chipselect; /* Some SPI controllers pose alignment requirements on DMAable * buffers; let protocol drivers know about these requirements. */ u16 dma_alignment; /* spi_device.mode flags understood by this controller driver */ u32 mode_bits; /* spi_device.mode flags override flags for this controller */ u32 buswidth_override_bits; /* Bitmask of supported bits_per_word for transfers */ u32 bits_per_word_mask; #define SPI_BPW_MASK(bits) BIT((bits) - 1) #define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1) /* Limits on transfer speed */ u32 min_speed_hz; u32 max_speed_hz; /* Other constraints relevant to this driver */ u16 flags; #define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* Can't do full duplex */ #define SPI_CONTROLLER_NO_RX BIT(1) /* Can't do buffer read */ #define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */ #define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */ #define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */ #define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */ #define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */ /* * The spi-controller has multi chip select capability and can * assert/de-assert more than one chip select at once. */ #define SPI_CONTROLLER_MULTI_CS BIT(7) /* Flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; union { /* Flag indicating this is an SPI slave controller */ bool slave; /* Flag indicating this is an SPI target controller */ bool target; }; /* * On some hardware transfer / message size may be constrained * the limit may depend on device transfer settings. */ size_t (*max_transfer_size)(struct spi_device *spi); size_t (*max_message_size)(struct spi_device *spi); /* I/O mutex */ struct mutex io_mutex; /* Used to avoid adding the same CS twice */ struct mutex add_lock; /* Lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; /* Flag indicating that the SPI bus is locked for exclusive use */ bool bus_lock_flag; /* * Setup mode and clock, etc (SPI driver may call many times). * * IMPORTANT: this may be called when transfers to another * device are active. DO NOT UPDATE SHARED REGISTERS in ways * which could break those transfers. */ int (*setup)(struct spi_device *spi); /* * set_cs_timing() method is for SPI controllers that supports * configuring CS timing. * * This hook allows SPI client drivers to request SPI controllers * to configure specific CS timing through spi_set_cs_timing() after * spi_setup(). */ int (*set_cs_timing)(struct spi_device *spi); /* * Bidirectional bulk transfers * * + The transfer() method may not sleep; its main role is * just to add the message to the queue. * + For now there's no remove-from-queue operation, or * any other request management * + To a given spi_device, message queueing is pure FIFO * * + The controller's main job is to process its message queue, * selecting a chip (for masters), then transferring data * + If there are multiple spi_device children, the i/o queue * arbitration algorithm is unspecified (round robin, FIFO, * priority, reservations, preemption, etc) * * + Chipselect stays active during the entire message * (unless modified by spi_transfer.cs_change != 0). * + The message transfers use clock and SPI mode parameters * previously established by setup() for this device */ int (*transfer)(struct spi_device *spi, struct spi_message *mesg); /* Called on release() to free memory provided by spi_controller */ void (*cleanup)(struct spi_device *spi); /* * Used to enable core support for DMA handling, if can_dma() * exists and returns true then the transfer will be mapped * prior to transfer_one() being called. The driver should * not modify or store xfer and dma_tx and dma_rx must be set * while the device is prepared. */ bool (*can_dma)(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer); struct device *dma_map_dev; struct device *cur_rx_dma_dev; struct device *cur_tx_dma_dev; /* * These hooks are for drivers that want to use the generic * controller transfer queueing mechanism. If these are used, the * transfer() function above must NOT be specified by the driver. * Over time we expect SPI drivers to be phased over to this API. */ bool queued; struct kthread_worker *kworker; struct kthread_work pump_messages; spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; struct completion cur_msg_completion; bool cur_msg_incomplete; bool cur_msg_need_completion; bool busy; bool running; bool rt; bool auto_runtime_pm; bool fallback; bool last_cs_mode_high; s8 last_cs[SPI_CS_CNT_MAX]; u32 last_cs_index_mask : SPI_CS_CNT_MAX; struct completion xfer_completion; size_t max_dma_len; int (*optimize_message)(struct spi_message *msg); int (*unoptimize_message)(struct spi_message *msg); int (*prepare_transfer_hardware)(struct spi_controller *ctlr); int (*transfer_one_message)(struct spi_controller *ctlr, struct spi_message *mesg); int (*unprepare_transfer_hardware)(struct spi_controller *ctlr); int (*prepare_message)(struct spi_controller *ctlr, struct spi_message *message); int (*unprepare_message)(struct spi_controller *ctlr, struct spi_message *message); int (*target_abort)(struct spi_controller *ctlr); /* * These hooks are for drivers that use a generic implementation * of transfer_one_message() provided by the core. */ void (*set_cs)(struct spi_device *spi, bool enable); int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *transfer); void (*handle_err)(struct spi_controller *ctlr, struct spi_message *message); /* Optimized handlers for SPI memory-like operations. */ const struct spi_controller_mem_ops *mem_ops; const struct spi_controller_mem_caps *mem_caps; /* GPIO chip select */ struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; s8 unused_native_cs; s8 max_native_cs; /* Statistics */ struct spi_statistics __percpu *pcpu_statistics; /* DMA channels for use with core dmaengine helpers */ struct dma_chan *dma_tx; struct dma_chan *dma_rx; /* Dummy data for full duplex devices */ void *dummy_rx; void *dummy_tx; int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs); /* * Driver sets this field to indicate it is able to snapshot SPI * transfers (needed e.g. for reading the time of POSIX clocks) */ bool ptp_sts_supported; /* Interrupt enable state during PTP system timestamping */ unsigned long irq_flags; /* Flag for enabling opportunistic skipping of the queue in spi_sync */ bool queue_empty; bool must_async; bool defer_optimize_message; }; static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) { return dev_get_drvdata(&ctlr->dev); } static inline void spi_controller_set_devdata(struct spi_controller *ctlr, void *data) { dev_set_drvdata(&ctlr->dev, data); } static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr) { if (!ctlr || !get_device(&ctlr->dev)) return NULL; return ctlr; } static inline void spi_controller_put(struct spi_controller *ctlr) { if (ctlr) put_device(&ctlr->dev); } static inline bool spi_controller_is_target(struct spi_controller *ctlr) { return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target; } /* PM calls that need to be issued by the driver */ extern int spi_controller_suspend(struct spi_controller *ctlr); extern int spi_controller_resume(struct spi_controller *ctlr); /* Calls the driver make to interact with the message queue */ extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr); extern void spi_finalize_current_message(struct spi_controller *ctlr); extern void spi_finalize_current_transfer(struct spi_controller *ctlr); /* Helper calls for driver to timestamp transfer */ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer, size_t progress, bool irqs_off); void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, size_t progress, bool irqs_off); /* The SPI driver core manages memory for the spi_controller classdev */ extern struct spi_controller *__spi_alloc_controller(struct device *host, unsigned int size, bool slave); static inline struct spi_controller *spi_alloc_master(struct device *host, unsigned int size) { return __spi_alloc_controller(host, size, false); } static inline struct spi_controller *spi_alloc_slave(struct device *host, unsigned int size) { if (!IS_ENABLED(CONFIG_SPI_SLAVE)) return NULL; return __spi_alloc_controller(host, size, true); } static inline struct spi_controller *spi_alloc_host(struct device *dev, unsigned int size) { return __spi_alloc_controller(dev, size, false); } static inline struct spi_controller *spi_alloc_target(struct device *dev, unsigned int size) { if (!IS_ENABLED(CONFIG_SPI_SLAVE)) return NULL; return __spi_alloc_controller(dev, size, true); } struct spi_controller *__devm_spi_alloc_controller(struct device *dev, unsigned int size, bool slave); static inline struct spi_controller *devm_spi_alloc_master(struct device *dev, unsigned int size) { return __devm_spi_alloc_controller(dev, size, false); } static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev, unsigned int size) { if (!IS_ENABLED(CONFIG_SPI_SLAVE)) return NULL; return __devm_spi_alloc_controller(dev, size, true); } static inline struct spi_controller *devm_spi_alloc_host(struct device *dev, unsigned int size) { return __devm_spi_alloc_controller(dev, size, false); } static inline struct spi_controller *devm_spi_alloc_target(struct device *dev, unsigned int size) { if (!IS_ENABLED(CONFIG_SPI_SLAVE)) return NULL; return __devm_spi_alloc_controller(dev, size, true); } extern int spi_register_controller(struct spi_controller *ctlr); extern int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr); extern void spi_unregister_controller(struct spi_controller *ctlr); #if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SPI_MASTER) extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev); extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, struct acpi_device *adev, int index); int acpi_spi_count_resources(struct acpi_device *adev); #else static inline struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) { return NULL; } static inline struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, struct acpi_device *adev, int index) { return ERR_PTR(-ENODEV); } static inline int acpi_spi_count_resources(struct acpi_device *adev) { return 0; } #endif /* * SPI resource management while processing a SPI message */ typedef void (*spi_res_release_t)(struct spi_controller *ctlr, struct spi_message *msg, void *res); /** * struct spi_res - SPI resource management structure * @entry: list entry * @release: release code called prior to freeing this resource * @data: extra data allocated for the specific use-case * * This is based on ideas from devres, but focused on life-cycle * management during spi_message processing. */ struct spi_res { struct list_head entry; spi_res_release_t release; unsigned long long data[]; /* Guarantee ull alignment */ }; /*---------------------------------------------------------------------------*/ /* * I/O INTERFACE between SPI controller and protocol drivers * * Protocol drivers use a queue of spi_messages, each transferring data * between the controller and memory buffers. * * The spi_messages themselves consist of a series of read+write transfer * segments. Those segments always read the same number of bits as they * write; but one or the other is easily ignored by passing a NULL buffer * pointer. (This is unlike most types of I/O API, because SPI hardware * is full duplex.) * * NOTE: Allocation of spi_transfer and spi_message memory is entirely * up to the protocol driver, which guarantees the integrity of both (as * well as the data buffers) for as long as the message is queued. */ /** * struct spi_transfer - a read/write buffer pair * @tx_buf: data to be written (DMA-safe memory), or NULL * @rx_buf: data to be read (DMA-safe memory), or NULL * @tx_dma: DMA address of tx_buf, currently not for client use * @rx_dma: DMA address of rx_buf, currently not for client use * @tx_nbits: number of bits used for writing. If 0 the default * (SPI_NBITS_SINGLE) is used. * @rx_nbits: number of bits used for reading. If 0 the default * (SPI_NBITS_SINGLE) is used. * @len: size of rx and tx buffers (in bytes) * @speed_hz: Select a speed other than the device default for this * transfer. If 0 the default (from @spi_device) is used. * @bits_per_word: select a bits_per_word other than the device default * for this transfer. If 0 the default (from @spi_device) is used. * @dummy_data: indicates transfer is dummy bytes transfer. * @cs_off: performs the transfer with chipselect off. * @cs_change: affects chipselect after this transfer completes * @cs_change_delay: delay between cs deassert and assert when * @cs_change is set and @spi_transfer is not the last in @spi_message * @delay: delay to be introduced after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. * @word_delay: inter word delay to be introduced after each word size * (set by bits_per_word) transmission. * @effective_speed_hz: the effective SCK-speed that was used to * transfer this transfer. Set to 0 if the SPI bus driver does * not support it. * @transfer_list: transfers are sequenced through @spi_message.transfers * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA * @tx_sg: Scatterlist for transmit, currently not for client use * @rx_sg: Scatterlist for receive, currently not for client use * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset * within @tx_buf for which the SPI device is requesting that the time * snapshot for this transfer begins. Upon completing the SPI transfer, * this value may have changed compared to what was requested, depending * on the available snapshotting resolution (DMA transfer, * @ptp_sts_supported is false, etc). * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning * that a single byte should be snapshotted). * If the core takes care of the timestamp (if @ptp_sts_supported is false * for this controller), it will set @ptp_sts_word_pre to 0, and * @ptp_sts_word_post to the length of the transfer. This is done * purposefully (instead of setting to spi_transfer->len - 1) to denote * that a transfer-level snapshot taken from within the driver may still * be of higher quality. * @ptp_sts: Pointer to a memory location held by the SPI slave device where a * PTP system timestamp structure may lie. If drivers use PIO or their * hardware has some sort of assist for retrieving exact transfer timing, * they can (and should) assert @ptp_sts_supported and populate this * structure using the ptp_read_system_*ts helper functions. * The timestamp must represent the time at which the SPI slave device has * processed the word, i.e. the "pre" timestamp should be taken before * transmitting the "pre" word, and the "post" timestamp after receiving * transmit confirmation from the controller for the "post" word. * @timestamped: true if the transfer has been timestamped * @error: Error status logged by SPI controller driver. * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. * In some cases, they may also want to provide DMA addresses for * the data being transferred; that may reduce overhead, when the * underlying driver uses DMA. * * If the transmit buffer is NULL, zeroes will be shifted out * while filling @rx_buf. If the receive buffer is NULL, the data * shifted in will be discarded. Only "len" bytes shift out (or in). * It's an error to try to shift out a partial word. (For example, by * shifting out three bytes with word size of sixteen or twenty bits; * the former uses two bytes per word, the latter uses four bytes.) * * In-memory data values are always in native CPU byte order, translated * from the wire byte order (big-endian except with SPI_LSB_FIRST). So * for example when bits_per_word is sixteen, buffers are 2N bytes long * (@len = 2N) and hold N sixteen bit words in CPU byte order. * * When the word size of the SPI transfer is not a power-of-two multiple * of eight bits, those in-memory words include extra bits. In-memory * words are always seen by protocol drivers as right-justified, so the * undefined (rx) or unused (tx) bits are always the most significant bits. * * All SPI transfers start with the relevant chipselect active. Normally * it stays selected until after the last transfer in a message. Drivers * can affect the chipselect signal using cs_change. * * (i) If the transfer isn't the last one in the message, this flag is * used to make the chipselect briefly go inactive in the middle of the * message. Toggling chipselect in this way may be needed to terminate * a chip command, letting a single spi_message perform all of group of * chip transactions together. * * (ii) When the transfer is the last one in the message, the chip may * stay selected until the next transfer. On multi-device SPI busses * with nothing blocking messages going to other devices, this is just * a performance hint; starting a message to another device deselects * this one. But in other cases, this can be used to ensure correctness. * Some devices need protocol transactions to be built from a series of * spi_message submissions, where the content of one message is determined * by the results of previous messages and where the whole transaction * ends when the chipselect goes inactive. * * When SPI can transfer in 1x,2x or 4x. It can get this transfer information * from device through @tx_nbits and @rx_nbits. In Bi-direction, these * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x) * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer. * * The code that submits an spi_message (and its spi_transfers) * to the lower layers is responsible for managing its memory. * Zero-initialize every field you don't set up explicitly, to * insulate against future API updates. After you submit a message * and its transfers, ignore them until its completion callback. */ struct spi_transfer { /* * It's okay if tx_buf == rx_buf (right?). * For MicroWire, one buffer must be NULL. * Buffers must work with dma_*map_single() calls. */ const void *tx_buf; void *rx_buf; unsigned len; #define SPI_TRANS_FAIL_NO_START BIT(0) #define SPI_TRANS_FAIL_IO BIT(1) u16 error; bool tx_sg_mapped; bool rx_sg_mapped; struct sg_table tx_sg; struct sg_table rx_sg; dma_addr_t tx_dma; dma_addr_t rx_dma; unsigned dummy_data:1; unsigned cs_off:1; unsigned cs_change:1; unsigned tx_nbits:4; unsigned rx_nbits:4; unsigned timestamped:1; #define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */ #define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */ #define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */ #define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */ u8 bits_per_word; struct spi_delay delay; struct spi_delay cs_change_delay; struct spi_delay word_delay; u32 speed_hz; u32 effective_speed_hz; unsigned int ptp_sts_word_pre; unsigned int ptp_sts_word_post; struct ptp_system_timestamp *ptp_sts; struct list_head transfer_list; }; /** * struct spi_message - one multi-segment SPI transaction * @transfers: list of transfer segments in this transaction * @spi: SPI device to which the transaction is queued * @pre_optimized: peripheral driver pre-optimized the message * @optimized: the message is in the optimized state * @prepared: spi_prepare_message was called for the this message * @status: zero for success, else negative errno * @complete: called to report transaction completions * @context: the argument to complete() when it's called * @frame_length: the total number of bytes in the message * @actual_length: the total number of bytes that were transferred in all * successful segments * @queue: for use by whichever driver currently owns the message * @state: for use by whichever driver currently owns the message * @opt_state: for use by whichever driver currently owns the message * @resources: for resource management when the SPI message is processed * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" * in the sense that no other spi_message may use that SPI bus until that * sequence completes. On some systems, many such sequences can execute as * a single programmed DMA transfer. On all systems, these messages are * queued, and might complete after transactions to other devices. Messages * sent to a given spi_device are always executed in FIFO order. * * The code that submits an spi_message (and its spi_transfers) * to the lower layers is responsible for managing its memory. * Zero-initialize every field you don't set up explicitly, to * insulate against future API updates. After you submit a message * and its transfers, ignore them until its completion callback. */ struct spi_message { struct list_head transfers; struct spi_device *spi; /* spi_optimize_message() was called for this message */ bool pre_optimized; /* __spi_optimize_message() was called for this message */ bool optimized; /* spi_prepare_message() was called for this message */ bool prepared; /* * REVISIT: we might want a flag affecting the behavior of the * last transfer ... allowing things like "read 16 bit length L" * immediately followed by "read L bytes". Basically imposing * a specific message scheduling algorithm. * * Some controller drivers (message-at-a-time queue processing) * could provide that as their default scheduling algorithm. But * others (with multi-message pipelines) could need a flag to * tell them about such special cases. */ /* Completion is reported through a callback */ int status; void (*complete)(void *context); void *context; unsigned frame_length; unsigned actual_length; /* * For optional use by whatever driver currently owns the * spi_message ... between calls to spi_async and then later * complete(), that's the spi_controller controller driver. */ struct list_head queue; void *state; /* * Optional state for use by controller driver between calls to * __spi_optimize_message() and __spi_unoptimize_message(). */ void *opt_state; /* List of spi_res resources when the SPI message is processed */ struct list_head resources; }; static inline void spi_message_init_no_memset(struct spi_message *m) { INIT_LIST_HEAD(&m->transfers); INIT_LIST_HEAD(&m->resources); } static inline void spi_message_init(struct spi_message *m) { memset(m, 0, sizeof *m); spi_message_init_no_memset(m); } static inline void spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) { list_add_tail(&t->transfer_list, &m->transfers); } static inline void spi_transfer_del(struct spi_transfer *t) { list_del(&t->transfer_list); } static inline int spi_transfer_delay_exec(struct spi_transfer *t) { return spi_delay_exec(&t->delay, t); } /** * spi_message_init_with_transfers - Initialize spi_message and append transfers * @m: spi_message to be initialized * @xfers: An array of SPI transfers * @num_xfers: Number of items in the xfer array * * This function initializes the given spi_message and adds each spi_transfer in * the given array to the message. */ static inline void spi_message_init_with_transfers(struct spi_message *m, struct spi_transfer *xfers, unsigned int num_xfers) { unsigned int i; spi_message_init(m); for (i = 0; i < num_xfers; ++i) spi_message_add_tail(&xfers[i], m); } /* * It's fine to embed message and transaction structures in other data * structures so long as you don't free them while they're in use. */ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) { struct spi_message_with_transfers { struct spi_message m; struct spi_transfer t[]; } *mwt; unsigned i; mwt = kzalloc(struct_size(mwt, t, ntrans), flags); if (!mwt) return NULL; spi_message_init_no_memset(&mwt->m); for (i = 0; i < ntrans; i++) spi_message_add_tail(&mwt->t[i], &mwt->m); return &mwt->m; } static inline void spi_message_free(struct spi_message *m) { kfree(m); } extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg); extern void spi_unoptimize_message(struct spi_message *msg); extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, struct spi_message *msg); extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_target_abort(struct spi_device *spi); static inline size_t spi_max_message_size(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; if (!ctlr->max_message_size) return SIZE_MAX; return ctlr->max_message_size(spi); } static inline size_t spi_max_transfer_size(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; size_t tr_max = SIZE_MAX; size_t msg_max = spi_max_message_size(spi); if (ctlr->max_transfer_size) tr_max = ctlr->max_transfer_size(spi); /* Transfer size limit must not be greater than message size limit */ return min(tr_max, msg_max); } /** * spi_is_bpw_supported - Check if bits per word is supported * @spi: SPI device * @bpw: Bits per word * * This function checks to see if the SPI controller supports @bpw. * * Returns: * True if @bpw is supported, false otherwise. */ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw) { u32 bpw_mask = spi->controller->bits_per_word_mask; if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw))) return true; return false; } /** * spi_controller_xfer_timeout - Compute a suitable timeout value * @ctlr: SPI device * @xfer: Transfer descriptor * * Compute a relevant timeout value for the given transfer. We derive the time * that it would take on a single data line and take twice this amount of time * with a minimum of 500ms to avoid false positives on loaded systems. * * Returns: Transfer timeout value in milliseconds. */ static inline unsigned int spi_controller_xfer_timeout(struct spi_controller *ctlr, struct spi_transfer *xfer) { return max(xfer->len * 8 * 2 / (xfer->speed_hz / 1000), 500U); } /*---------------------------------------------------------------------------*/ /* SPI transfer replacement methods which make use of spi_res */ struct spi_replaced_transfers; typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, struct spi_message *msg, struct spi_replaced_transfers *res); /** * struct spi_replaced_transfers - structure describing the spi_transfer * replacements that have occurred * so that they can get reverted * @release: some extra release code to get executed prior to * releasing this structure * @extradata: pointer to some extra data if requested or NULL * @replaced_transfers: transfers that have been replaced and which need * to get restored * @replaced_after: the transfer after which the @replaced_transfers * are to get re-inserted * @inserted: number of transfers inserted * @inserted_transfers: array of spi_transfers of array-size @inserted, * that have been replacing replaced_transfers * * Note: that @extradata will point to @inserted_transfers[@inserted] * if some extra allocation is requested, so alignment will be the same * as for spi_transfers. */ struct spi_replaced_transfers { spi_replaced_release_t release; void *extradata; struct list_head replaced_transfers; struct list_head *replaced_after; size_t inserted; struct spi_transfer inserted_transfers[]; }; /*---------------------------------------------------------------------------*/ /* SPI transfer transformation methods */ extern int spi_split_transfers_maxsize(struct spi_controller *ctlr, struct spi_message *msg, size_t maxsize); extern int spi_split_transfers_maxwords(struct spi_controller *ctlr, struct spi_message *msg, size_t maxwords); /*---------------------------------------------------------------------------*/ /* * All these synchronous SPI transfer routines are utilities layered * over the core async transfer primitive. Here, "synchronous" means * they will sleep uninterruptibly until the async transfer completes. */ extern int spi_sync(struct spi_device *spi, struct spi_message *message); extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); extern int spi_bus_lock(struct spi_controller *ctlr); extern int spi_bus_unlock(struct spi_controller *ctlr); /** * spi_sync_transfer - synchronous SPI data transfer * @spi: device with which data will be exchanged * @xfers: An array of spi_transfers * @num_xfers: Number of items in the xfer array * Context: can sleep * * Does a synchronous SPI data transfer of the given spi_transfer array. * * For more specific semantics see spi_sync(). * * Return: zero on success, else a negative error code. */ static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, unsigned int num_xfers) { struct spi_message msg; spi_message_init_with_transfers(&msg, xfers, num_xfers); return spi_sync(spi, &msg); } /** * spi_write - SPI synchronous write * @spi: device to which data will be written * @buf: data buffer * @len: data buffer size * Context: can sleep * * This function writes the buffer @buf. * Callable only from contexts that can sleep. * * Return: zero on success, else a negative error code. */ static inline int spi_write(struct spi_device *spi, const void *buf, size_t len) { struct spi_transfer t = { .tx_buf = buf, .len = len, }; return spi_sync_transfer(spi, &t, 1); } /** * spi_read - SPI synchronous read * @spi: device from which data will be read * @buf: data buffer * @len: data buffer size * Context: can sleep * * This function reads the buffer @buf. * Callable only from contexts that can sleep. * * Return: zero on success, else a negative error code. */ static inline int spi_read(struct spi_device *spi, void *buf, size_t len) { struct spi_transfer t = { .rx_buf = buf, .len = len, }; return spi_sync_transfer(spi, &t, 1); } /* This copies txbuf and rxbuf data; for small transfers only! */ extern int spi_write_then_read(struct spi_device *spi, const void *txbuf, unsigned n_tx, void *rxbuf, unsigned n_rx); /** * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read * @spi: device with which data will be exchanged * @cmd: command to be written before data is read back * Context: can sleep * * Callable only from contexts that can sleep. * * Return: the (unsigned) eight bit number returned by the * device, or else a negative error code. */ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) { ssize_t status; u8 result; status = spi_write_then_read(spi, &cmd, 1, &result, 1); /* Return negative errno or unsigned value */ return (status < 0) ? status : result; } /** * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read * @spi: device with which data will be exchanged * @cmd: command to be written before data is read back * Context: can sleep * * The number is returned in wire-order, which is at least sometimes * big-endian. * * Callable only from contexts that can sleep. * * Return: the (unsigned) sixteen bit number returned by the * device, or else a negative error code. */ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) { ssize_t status; u16 result; status = spi_write_then_read(spi, &cmd, 1, &result, 2); /* Return negative errno or unsigned value */ return (status < 0) ? status : result; } /** * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read * @spi: device with which data will be exchanged * @cmd: command to be written before data is read back * Context: can sleep * * This function is similar to spi_w8r16, with the exception that it will * convert the read 16 bit data word from big-endian to native endianness. * * Callable only from contexts that can sleep. * * Return: the (unsigned) sixteen bit number returned by the device in CPU * endianness, or else a negative error code. */ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) { ssize_t status; __be16 result; status = spi_write_then_read(spi, &cmd, 1, &result, 2); if (status < 0) return status; return be16_to_cpu(result); } /*---------------------------------------------------------------------------*/ /* * INTERFACE between board init code and SPI infrastructure. * * No SPI driver ever sees these SPI device table segments, but * it's how the SPI core (or adapters that get hotplugged) grows * the driver model tree. * * As a rule, SPI devices can't be probed. Instead, board init code * provides a table listing the devices which are present, with enough * information to bind and set up the device's driver. There's basic * support for non-static configurations too; enough to handle adding * parport adapters, or microcontrollers acting as USB-to-SPI bridges. */ /** * struct spi_board_info - board-specific template for a SPI device * @modalias: Initializes spi_device.modalias; identifies the driver. * @platform_data: Initializes spi_device.platform_data; the particular * data stored there is driver-specific. * @swnode: Software node for the device. * @controller_data: Initializes spi_device.controller_data; some * controllers need hints about hardware setup, e.g. for DMA. * @irq: Initializes spi_device.irq; depends on how the board is wired. * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits * from the chip datasheet and board-specific signal quality issues. * @bus_num: Identifies which spi_controller parents the spi_device; unused * by spi_new_device(), and otherwise depends on board wiring. * @chip_select: Initializes spi_device.chip_select; depends on how * the board is wired. * @mode: Initializes spi_device.mode; based on the chip datasheet, board * wiring (some devices support both 3WIRE and standard modes), and * possibly presence of an inverter in the chipselect path. * * When adding new SPI devices to the device tree, these structures serve * as a partial device template. They hold information which can't always * be determined by drivers. Information that probe() can establish (such * as the default transfer wordsize) is not included here. * * These structures are used in two places. Their primary role is to * be stored in tables of board-specific device descriptors, which are * declared early in board initialization and then used (much later) to * populate a controller's device tree after the that controller's driver * initializes. A secondary (and atypical) role is as a parameter to * spi_new_device() call, which happens after those controller drivers * are active in some dynamic board configuration models. */ struct spi_board_info { /* * The device name and module name are coupled, like platform_bus; * "modalias" is normally the driver name. * * platform_data goes to spi_device.dev.platform_data, * controller_data goes to spi_device.controller_data, * IRQ is copied too. */ char modalias[SPI_NAME_SIZE]; const void *platform_data; const struct software_node *swnode; void *controller_data; int irq; /* Slower signaling on noisy or low voltage boards */ u32 max_speed_hz; /* * bus_num is board specific and matches the bus_num of some * spi_controller that will probably be registered later. * * chip_select reflects how this chip is wired to that master; * it's less than num_chipselect. */ u16 bus_num; u16 chip_select; /* * mode becomes spi_device.mode, and is essential for chips * where the default of SPI_CS_HIGH = 0 is wrong. */ u32 mode; /* * ... may need additional spi_device chip config data here. * avoid stuff protocol drivers can set; but include stuff * needed to behave without being bound to a driver: * - quirks like clock rate mattering when not selected */ }; #ifdef CONFIG_SPI extern int spi_register_board_info(struct spi_board_info const *info, unsigned n); #else /* Board init code may ignore whether SPI is configured or not */ static inline int spi_register_board_info(struct spi_board_info const *info, unsigned n) { return 0; } #endif /* * If you're hotplugging an adapter with devices (parport, USB, etc) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but * normally that would be handled by spi_unregister_controller(). * * You can also use spi_alloc_device() and spi_add_device() to use a two * stage registration sequence for each spi_device. This gives the caller * some more control over the spi_device structure before it is registered, * but requires that caller to initialize fields that would otherwise * be defined using the board info. */ extern struct spi_device * spi_alloc_device(struct spi_controller *ctlr); extern int spi_add_device(struct spi_device *spi); extern struct spi_device * spi_new_device(struct spi_controller *, struct spi_board_info *); extern void spi_unregister_device(struct spi_device *spi); extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); extern const void * spi_get_device_match_data(const struct spi_device *sdev); static inline bool spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) { return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); } #endif /* __LINUX_SPI_H */ |
113 180 178 177 50 50 3 50 108 210 208 208 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERCPU_RWSEM_H #define _LINUX_PERCPU_RWSEM_H #include <linux/atomic.h> #include <linux/percpu.h> #include <linux/rcuwait.h> #include <linux/wait.h> #include <linux/rcu_sync.h> #include <linux/lockdep.h> struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __percpu *read_count; struct rcuwait writer; wait_queue_head_t waiters; atomic_t block; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, #else #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) #endif #define __DEFINE_PERCPU_RWSEM(name, is_static) \ static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ is_static struct percpu_rw_semaphore name = { \ .rss = __RCU_SYNC_INITIALIZER(name.rss), \ .read_count = &__percpu_rwsem_rc_##name, \ .writer = __RCUWAIT_INITIALIZER(name.writer), \ .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \ .block = ATOMIC_INIT(0), \ __PERCPU_RWSEM_DEP_MAP_INIT(name) \ } #define DEFINE_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, /* not static */) #define DEFINE_STATIC_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, static) extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { might_sleep(); rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); preempt_disable(); /* * We are in an RCU-sched read-side critical section, so the writer * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() * and that once the synchronize_rcu() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ if (likely(rcu_sync_is_idle(&sem->rss))) this_cpu_inc(*sem->read_count); else __percpu_down_read(sem, false); /* Unconditional memory barrier */ /* * The preempt_enable() prevents the compiler from * bleeding the critical section out. */ preempt_enable(); } static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) { bool ret = true; preempt_disable(); /* * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) this_cpu_inc(*sem->read_count); else ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); /* * The barrier() from preempt_enable() prevents the compiler from * bleeding the critical section out. */ if (ret) rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); return ret; } static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { rwsem_release(&sem->dep_map, _RET_IP_); preempt_disable(); /* * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) { this_cpu_dec(*sem->read_count); } else { /* * slowpath; reader will only ever wake a single blocked * writer. */ smp_mb(); /* B matches C */ /* * In other words, if they see our decrement (presumably to * aggregate zero, as that is the only time it matters) they * will also see our critical section. */ this_cpu_dec(*sem->read_count); rcuwait_wake_up(&sem->writer); } preempt_enable(); } extern bool percpu_is_read_locked(struct percpu_rw_semaphore *); extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem) { return atomic_read(&sem->block); } extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, const char *, struct lock_class_key *); extern void percpu_free_rwsem(struct percpu_rw_semaphore *); #define percpu_init_rwsem(sem) \ ({ \ static struct lock_class_key rwsem_key; \ __percpu_init_rwsem(sem, #sem, &rwsem_key); \ }) #define percpu_rwsem_is_held(sem) lockdep_is_held(sem) #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem) static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, unsigned long ip) { lock_release(&sem->dep_map, ip); } static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip); } #endif |
3 2 1 1 1 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 | // SPDX-License-Identifier: GPL-2.0+ /* Siemens ID Mouse driver v0.6 Copyright (C) 2004-5 by Florian 'Floe' Echtler <echtler@fs.tum.de> and Andreas 'ad' Deresch <aderesch@fs.tum.de> Derived from the USB Skeleton driver 1.1, Copyright (C) 2003 Greg Kroah-Hartman (greg@kroah.com) Additional information provided by Martin Reising <Martin.Reising@natural-computing.de> */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> /* image constants */ #define WIDTH 225 #define HEIGHT 289 #define HEADER "P5 225 289 255 " #define IMGSIZE ((WIDTH * HEIGHT) + sizeof(HEADER)-1) #define DRIVER_SHORT "idmouse" #define DRIVER_AUTHOR "Florian 'Floe' Echtler <echtler@fs.tum.de>" #define DRIVER_DESC "Siemens ID Mouse FingerTIP Sensor Driver" /* minor number for misc USB devices */ #define USB_IDMOUSE_MINOR_BASE 132 /* vendor and device IDs */ #define ID_SIEMENS 0x0681 #define ID_IDMOUSE 0x0005 #define ID_CHERRY 0x0010 /* device ID table */ static const struct usb_device_id idmouse_table[] = { {USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */ {USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */ {} /* terminating null entry */ }; /* sensor commands */ #define FTIP_RESET 0x20 #define FTIP_ACQUIRE 0x21 #define FTIP_RELEASE 0x22 #define FTIP_BLINK 0x23 /* LSB of value = blink pulse width */ #define FTIP_SCROLL 0x24 #define ftip_command(dev, command, value, index) \ usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), command, \ USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000) MODULE_DEVICE_TABLE(usb, idmouse_table); /* structure to hold all of our device specific stuff */ struct usb_idmouse { struct usb_device *udev; /* save off the usb device pointer */ struct usb_interface *interface; /* the interface for this device */ unsigned char *bulk_in_buffer; /* the buffer to receive data */ size_t bulk_in_size; /* the maximum bulk packet size */ size_t orig_bi_size; /* same as above, but reported by the device */ __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */ int open; /* if the port is open or not */ int present; /* if the device is not disconnected */ struct mutex lock; /* locks this structure */ }; /* local function prototypes */ static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos); static int idmouse_open(struct inode *inode, struct file *file); static int idmouse_release(struct inode *inode, struct file *file); static int idmouse_probe(struct usb_interface *interface, const struct usb_device_id *id); static void idmouse_disconnect(struct usb_interface *interface); static int idmouse_suspend(struct usb_interface *intf, pm_message_t message); static int idmouse_resume(struct usb_interface *intf); /* file operation pointers */ static const struct file_operations idmouse_fops = { .owner = THIS_MODULE, .read = idmouse_read, .open = idmouse_open, .release = idmouse_release, .llseek = default_llseek, }; /* class driver information */ static struct usb_class_driver idmouse_class = { .name = "idmouse%d", .fops = &idmouse_fops, .minor_base = USB_IDMOUSE_MINOR_BASE, }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver idmouse_driver = { .name = DRIVER_SHORT, .probe = idmouse_probe, .disconnect = idmouse_disconnect, .suspend = idmouse_suspend, .resume = idmouse_resume, .reset_resume = idmouse_resume, .id_table = idmouse_table, .supports_autosuspend = 1, }; static int idmouse_create_image(struct usb_idmouse *dev) { int bytes_read; int bulk_read; int result; memcpy(dev->bulk_in_buffer, HEADER, sizeof(HEADER)-1); bytes_read = sizeof(HEADER)-1; /* reset the device and set a fast blink rate */ result = ftip_command(dev, FTIP_RELEASE, 0, 0); if (result < 0) goto reset; result = ftip_command(dev, FTIP_BLINK, 1, 0); if (result < 0) goto reset; /* initialize the sensor - sending this command twice */ /* significantly reduces the rate of failed reads */ result = ftip_command(dev, FTIP_ACQUIRE, 0, 0); if (result < 0) goto reset; result = ftip_command(dev, FTIP_ACQUIRE, 0, 0); if (result < 0) goto reset; /* start the readout - sending this command twice */ /* presumably enables the high dynamic range mode */ result = ftip_command(dev, FTIP_RESET, 0, 0); if (result < 0) goto reset; result = ftip_command(dev, FTIP_RESET, 0, 0); if (result < 0) goto reset; /* loop over a blocking bulk read to get data from the device */ while (bytes_read < IMGSIZE) { result = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), dev->bulk_in_buffer + bytes_read, dev->bulk_in_size, &bulk_read, 5000); if (result < 0) { /* Maybe this error was caused by the increased packet size? */ /* Reset to the original value and tell userspace to retry. */ if (dev->bulk_in_size != dev->orig_bi_size) { dev->bulk_in_size = dev->orig_bi_size; result = -EAGAIN; } break; } if (signal_pending(current)) { result = -EINTR; break; } bytes_read += bulk_read; } /* check for valid image */ /* right border should be black (0x00) */ for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH) if (dev->bulk_in_buffer[bytes_read] != 0x00) return -EAGAIN; /* lower border should be white (0xFF) */ for (bytes_read = IMGSIZE-WIDTH; bytes_read < IMGSIZE-1; bytes_read++) if (dev->bulk_in_buffer[bytes_read] != 0xFF) return -EAGAIN; /* reset the device */ reset: ftip_command(dev, FTIP_RELEASE, 0, 0); /* should be IMGSIZE == 65040 */ dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n", bytes_read); return result; } /* PM operations are nops as this driver does IO only during open() */ static int idmouse_suspend(struct usb_interface *intf, pm_message_t message) { return 0; } static int idmouse_resume(struct usb_interface *intf) { return 0; } static inline void idmouse_delete(struct usb_idmouse *dev) { kfree(dev->bulk_in_buffer); kfree(dev); } static int idmouse_open(struct inode *inode, struct file *file) { struct usb_idmouse *dev; struct usb_interface *interface; int result; /* get the interface from minor number and driver information */ interface = usb_find_interface(&idmouse_driver, iminor(inode)); if (!interface) return -ENODEV; /* get the device information block from the interface */ dev = usb_get_intfdata(interface); if (!dev) return -ENODEV; /* lock this device */ mutex_lock(&dev->lock); /* check if already open */ if (dev->open) { /* already open, so fail */ result = -EBUSY; } else { /* create a new image and check for success */ result = usb_autopm_get_interface(interface); if (result) goto error; result = idmouse_create_image(dev); usb_autopm_put_interface(interface); if (result) goto error; /* increment our usage count for the driver */ ++dev->open; /* save our object in the file's private structure */ file->private_data = dev; } error: /* unlock this device */ mutex_unlock(&dev->lock); return result; } static int idmouse_release(struct inode *inode, struct file *file) { struct usb_idmouse *dev; dev = file->private_data; if (dev == NULL) return -ENODEV; /* lock our device */ mutex_lock(&dev->lock); --dev->open; if (!dev->present) { /* the device was unplugged before the file was released */ mutex_unlock(&dev->lock); idmouse_delete(dev); } else { mutex_unlock(&dev->lock); } return 0; } static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos) { struct usb_idmouse *dev = file->private_data; int result; /* lock this object */ mutex_lock(&dev->lock); /* verify that the device wasn't unplugged */ if (!dev->present) { mutex_unlock(&dev->lock); return -ENODEV; } result = simple_read_from_buffer(buffer, count, ppos, dev->bulk_in_buffer, IMGSIZE); /* unlock the device */ mutex_unlock(&dev->lock); return result; } static int idmouse_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_idmouse *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int result; /* check if we have gotten the data or the hid interface */ iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0x0A) return -ENODEV; if (iface_desc->desc.bNumEndpoints < 1) return -ENODEV; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; mutex_init(&dev->lock); dev->udev = udev; dev->interface = interface; /* set up the endpoint information - use only the first bulk-in endpoint */ result = usb_find_bulk_in_endpoint(iface_desc, &endpoint); if (result) { dev_err(&interface->dev, "Unable to find bulk-in endpoint.\n"); idmouse_delete(dev); return result; } dev->orig_bi_size = usb_endpoint_maxp(endpoint); dev->bulk_in_size = 0x200; /* works _much_ faster */ dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; dev->bulk_in_buffer = kmalloc(IMGSIZE + dev->bulk_in_size, GFP_KERNEL); if (!dev->bulk_in_buffer) { idmouse_delete(dev); return -ENOMEM; } /* allow device read, write and ioctl */ dev->present = 1; /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); result = usb_register_dev(interface, &idmouse_class); if (result) { /* something prevented us from registering this device */ dev_err(&interface->dev, "Unable to allocate minor number.\n"); idmouse_delete(dev); return result; } /* be noisy */ dev_info(&interface->dev,"%s now attached\n",DRIVER_DESC); return 0; } static void idmouse_disconnect(struct usb_interface *interface) { struct usb_idmouse *dev = usb_get_intfdata(interface); /* give back our minor */ usb_deregister_dev(interface, &idmouse_class); /* lock the device */ mutex_lock(&dev->lock); /* prevent device read, write and ioctl */ dev->present = 0; /* if the device is opened, idmouse_release will clean this up */ if (!dev->open) { mutex_unlock(&dev->lock); idmouse_delete(dev); } else { /* unlock */ mutex_unlock(&dev->lock); } dev_info(&interface->dev, "disconnected\n"); } module_usb_driver(idmouse_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
1 2 2 1 1 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for RobotFuzz OSIF * * Copyright (c) 2013 Andrew Lunn <andrew@lunn.ch> * Copyright (c) 2007 Barry Carter <Barry.Carter@robotfuzz.com> * * Based on the i2c-tiny-usb by * * Copyright (C) 2006 Til Harbaum (Till@Harbaum.org) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/usb.h> #define OSIFI2C_READ 20 #define OSIFI2C_WRITE 21 #define OSIFI2C_STOP 22 #define OSIFI2C_STATUS 23 #define OSIFI2C_SET_BIT_RATE 24 #define STATUS_ADDRESS_ACK 0 #define STATUS_ADDRESS_NAK 2 struct osif_priv { struct usb_device *usb_dev; struct usb_interface *interface; struct i2c_adapter adapter; unsigned char status; }; static int osif_usb_read(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct osif_priv *priv = adapter->algo_data; return usb_control_msg(priv->usb_dev, usb_rcvctrlpipe(priv->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN, value, index, data, len, 2000); } static int osif_usb_write(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct osif_priv *priv = adapter->algo_data; return usb_control_msg(priv->usb_dev, usb_sndctrlpipe(priv->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, index, data, len, 2000); } static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct osif_priv *priv = adapter->algo_data; struct i2c_msg *pmsg; int ret; int i; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (pmsg->flags & I2C_M_RD) { ret = osif_usb_read(adapter, OSIFI2C_READ, pmsg->flags, pmsg->addr, pmsg->buf, pmsg->len); if (ret != pmsg->len) { dev_err(&adapter->dev, "failure reading data\n"); return -EREMOTEIO; } } else { ret = osif_usb_write(adapter, OSIFI2C_WRITE, pmsg->flags, pmsg->addr, pmsg->buf, pmsg->len); if (ret != pmsg->len) { dev_err(&adapter->dev, "failure writing data\n"); return -EREMOTEIO; } } ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0); if (ret) { dev_err(&adapter->dev, "failure sending STOP\n"); return -EREMOTEIO; } /* read status */ ret = osif_usb_read(adapter, OSIFI2C_STATUS, 0, 0, &priv->status, 1); if (ret != 1) { dev_err(&adapter->dev, "failure reading status\n"); return -EREMOTEIO; } if (priv->status != STATUS_ADDRESS_ACK) { dev_dbg(&adapter->dev, "status = %d\n", priv->status); return -EREMOTEIO; } } return i; } static u32 osif_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm osif_algorithm = { .xfer = osif_xfer, .functionality = osif_func, }; #define USB_OSIF_VENDOR_ID 0x1964 #define USB_OSIF_PRODUCT_ID 0x0001 static const struct usb_device_id osif_table[] = { { USB_DEVICE(USB_OSIF_VENDOR_ID, USB_OSIF_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, osif_table); static int osif_probe(struct usb_interface *interface, const struct usb_device_id *id) { int ret; struct osif_priv *priv; u16 version; priv = devm_kzalloc(&interface->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->usb_dev = usb_get_dev(interface_to_usbdev(interface)); priv->interface = interface; usb_set_intfdata(interface, priv); priv->adapter.owner = THIS_MODULE; priv->adapter.class = I2C_CLASS_HWMON; priv->adapter.algo = &osif_algorithm; priv->adapter.algo_data = priv; snprintf(priv->adapter.name, sizeof(priv->adapter.name), "OSIF at bus %03d device %03d", priv->usb_dev->bus->busnum, priv->usb_dev->devnum); /* * Set bus frequency. The frequency is: * 120,000,000 / ( 16 + 2 * div * 4^prescale). * Using dev = 52, prescale = 0 give 100KHz */ ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0, NULL, 0); if (ret) { dev_err(&interface->dev, "failure sending bit rate"); usb_put_dev(priv->usb_dev); return ret; } i2c_add_adapter(&(priv->adapter)); version = le16_to_cpu(priv->usb_dev->descriptor.bcdDevice); dev_info(&interface->dev, "version %x.%02x found at bus %03d address %03d", version >> 8, version & 0xff, priv->usb_dev->bus->busnum, priv->usb_dev->devnum); return 0; } static void osif_disconnect(struct usb_interface *interface) { struct osif_priv *priv = usb_get_intfdata(interface); i2c_del_adapter(&(priv->adapter)); usb_set_intfdata(interface, NULL); usb_put_dev(priv->usb_dev); } static struct usb_driver osif_driver = { .name = "RobotFuzz Open Source InterFace, OSIF", .probe = osif_probe, .disconnect = osif_disconnect, .id_table = osif_table, }; module_usb_driver(osif_driver); MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>"); MODULE_AUTHOR("Barry Carter <barry.carter@robotfuzz.com>"); MODULE_DESCRIPTION("RobotFuzz OSIF driver"); MODULE_LICENSE("GPL v2"); |
1 2 1 1 2 1 2 2 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Holtek gaming mice * Copyright (c) 2013 Christian Ohm * Heavily inspired by various other HID drivers that adjust the report * descriptor. */ /* */ #include <linux/hid.h> #include <linux/module.h> #include <linux/usb.h> #include "hid-ids.h" /* * The report descriptor of some Holtek based gaming mice specifies an * excessively large number of consumer usages (2^15), which is more than * HID_MAX_USAGES. This prevents proper parsing of the report descriptor. * * This driver fixes the report descriptor for: * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 * and Zalman ZM-GM1 * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse * - USB ID 04d9:a072, sold as LEETGION Hellion Gaming Mouse * - USB ID 04d9:a0c2, sold as ETEKCITY Scroll T-140 Gaming Mouse */ static const __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { /* Change usage maximum and logical maximum from 0x7fff to * 0x2fff, so they don't exceed HID_MAX_USAGES */ switch (hdev->product) { case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067: case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072: case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2: if (*rsize >= 122 && rdesc[115] == 0xff && rdesc[116] == 0x7f && rdesc[120] == 0xff && rdesc[121] == 0x7f) { hid_info(hdev, "Fixing up report descriptor\n"); rdesc[116] = rdesc[121] = 0x2f; } break; case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070: case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081: if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f && rdesc[111] == 0xff && rdesc[112] == 0x7f) { hid_info(hdev, "Fixing up report descriptor\n"); rdesc[107] = rdesc[112] = 0x2f; } break; } } return rdesc; } static int holtek_mouse_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; if (!hid_is_usb(hdev)) return -EINVAL; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid parse failed: %d\n", ret); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed: %d\n", ret); return ret; } return 0; } static const struct hid_device_id holtek_mouse_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, { } }; MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); static struct hid_driver holtek_mouse_driver = { .name = "holtek_mouse", .id_table = holtek_mouse_devices, .report_fixup = holtek_mouse_report_fixup, .probe = holtek_mouse_probe, }; module_hid_driver(holtek_mouse_driver); MODULE_DESCRIPTION("HID driver for Holtek gaming mice"); MODULE_LICENSE("GPL"); |
22 22 22 22 21 3 3 3 3 3 3 22 22 22 3 3 3 3 3 22 21 22 22 22 22 22 3 3 3 3 3 3 3 3 2 2 3 3 3 3 3 3 3 3 3 3 1 1 2 3 3 3 2 3 3 3 3 3 3 3 2 2 2 22 22 22 22 22 21 3 19 22 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 | // SPDX-License-Identifier: GPL-2.0+ /* * module/drivers.c * functions for manipulating drivers * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net> */ #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/dma-direction.h> #include <linux/interrupt.h> #include <linux/firmware.h> #include <linux/comedi/comedidev.h> #include "comedi_internal.h" struct comedi_driver *comedi_drivers; /* protects access to comedi_drivers */ DEFINE_MUTEX(comedi_drivers_list_lock); /** * comedi_set_hw_dev() - Set hardware device associated with COMEDI device * @dev: COMEDI device. * @hw_dev: Hardware device. * * For automatically configured COMEDI devices (resulting from a call to * comedi_auto_config() or one of its wrappers from the low-level COMEDI * driver), comedi_set_hw_dev() is called automatically by the COMEDI core * to associate the COMEDI device with the hardware device. It can also be * called directly by "legacy" low-level COMEDI drivers that rely on the * %COMEDI_DEVCONFIG ioctl to configure the hardware as long as the hardware * has a &struct device. * * If @dev->hw_dev is NULL, it gets a reference to @hw_dev and sets * @dev->hw_dev, otherwise, it does nothing. Calling it multiple times * with the same hardware device is not considered an error. If it gets * a reference to the hardware device, it will be automatically 'put' when * the device is detached from COMEDI. * * Returns 0 if @dev->hw_dev was NULL or the same as @hw_dev, otherwise * returns -EEXIST. */ int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev) { if (hw_dev == dev->hw_dev) return 0; if (dev->hw_dev) return -EEXIST; dev->hw_dev = get_device(hw_dev); return 0; } EXPORT_SYMBOL_GPL(comedi_set_hw_dev); static void comedi_clear_hw_dev(struct comedi_device *dev) { put_device(dev->hw_dev); dev->hw_dev = NULL; } /** * comedi_alloc_devpriv() - Allocate memory for the device private data * @dev: COMEDI device. * @size: Size of the memory to allocate. * * The allocated memory is zero-filled. @dev->private points to it on * return. The memory will be automatically freed when the COMEDI device is * "detached". * * Returns a pointer to the allocated memory, or NULL on failure. */ void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size) { dev->private = kzalloc(size, GFP_KERNEL); return dev->private; } EXPORT_SYMBOL_GPL(comedi_alloc_devpriv); /** * comedi_alloc_subdevices() - Allocate subdevices for COMEDI device * @dev: COMEDI device. * @num_subdevices: Number of subdevices to allocate. * * Allocates and initializes an array of &struct comedi_subdevice for the * COMEDI device. If successful, sets @dev->subdevices to point to the * first one and @dev->n_subdevices to the number. * * Returns 0 on success, -EINVAL if @num_subdevices is < 1, or -ENOMEM if * failed to allocate the memory. */ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices) { struct comedi_subdevice *s; int i; if (num_subdevices < 1) return -EINVAL; s = kcalloc(num_subdevices, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; dev->subdevices = s; dev->n_subdevices = num_subdevices; for (i = 0; i < num_subdevices; ++i) { s = &dev->subdevices[i]; s->device = dev; s->index = i; s->async_dma_dir = DMA_NONE; spin_lock_init(&s->spin_lock); s->minor = -1; } return 0; } EXPORT_SYMBOL_GPL(comedi_alloc_subdevices); /** * comedi_alloc_subdev_readback() - Allocate memory for the subdevice readback * @s: COMEDI subdevice. * * This is called by low-level COMEDI drivers to allocate an array to record * the last values written to a subdevice's analog output channels (at least * by the %INSN_WRITE instruction), to allow them to be read back by an * %INSN_READ instruction. It also provides a default handler for the * %INSN_READ instruction unless one has already been set. * * On success, @s->readback points to the first element of the array, which * is zero-filled. The low-level driver is responsible for updating its * contents. @s->insn_read will be set to comedi_readback_insn_read() * unless it is already non-NULL. * * Returns 0 on success, -EINVAL if the subdevice has no channels, or * -ENOMEM on allocation failure. */ int comedi_alloc_subdev_readback(struct comedi_subdevice *s) { if (!s->n_chan) return -EINVAL; s->readback = kcalloc(s->n_chan, sizeof(*s->readback), GFP_KERNEL); if (!s->readback) return -ENOMEM; if (!s->insn_read) s->insn_read = comedi_readback_insn_read; return 0; } EXPORT_SYMBOL_GPL(comedi_alloc_subdev_readback); static void comedi_device_detach_cleanup(struct comedi_device *dev) { int i; struct comedi_subdevice *s; lockdep_assert_held(&dev->attach_lock); lockdep_assert_held(&dev->mutex); if (dev->subdevices) { for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (comedi_can_auto_free_spriv(s)) kfree(s->private); comedi_free_subdevice_minor(s); if (s->async) { comedi_buf_alloc(dev, s, 0); kfree(s->async); } kfree(s->readback); } kfree(dev->subdevices); dev->subdevices = NULL; dev->n_subdevices = 0; } kfree(dev->private); if (!IS_ERR(dev->pacer)) kfree(dev->pacer); dev->private = NULL; dev->pacer = NULL; dev->driver = NULL; dev->board_name = NULL; dev->board_ptr = NULL; dev->mmio = NULL; dev->iobase = 0; dev->iolen = 0; dev->ioenabled = false; dev->irq = 0; dev->read_subdev = NULL; dev->write_subdev = NULL; dev->open = NULL; dev->close = NULL; comedi_clear_hw_dev(dev); } void comedi_device_detach(struct comedi_device *dev) { lockdep_assert_held(&dev->mutex); comedi_device_cancel_all(dev); down_write(&dev->attach_lock); dev->attached = false; dev->detach_count++; if (dev->driver) dev->driver->detach(dev); comedi_device_detach_cleanup(dev); up_write(&dev->attach_lock); } static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s) { return -EINVAL; } static int insn_device_inval(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { return -EINVAL; } static unsigned int get_zero_valid_routes(struct comedi_device *dev, unsigned int n_pairs, unsigned int *pair_data) { return 0; } int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { return -EINVAL; } /** * comedi_readback_insn_read() - A generic (*insn_read) for subdevice readback. * @dev: COMEDI device. * @s: COMEDI subdevice. * @insn: COMEDI instruction. * @data: Pointer to return the readback data. * * Handles the %INSN_READ instruction for subdevices that use the readback * array allocated by comedi_alloc_subdev_readback(). It may be used * directly as the subdevice's handler (@s->insn_read) or called via a * wrapper. * * @insn->n is normally 1, which will read a single value. If higher, the * same element of the readback array will be read multiple times. * * Returns @insn->n on success, or -EINVAL if @s->readback is NULL. */ int comedi_readback_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); int i; if (!s->readback) return -EINVAL; for (i = 0; i < insn->n; i++) data[i] = s->readback[chan]; return insn->n; } EXPORT_SYMBOL_GPL(comedi_readback_insn_read); /** * comedi_timeout() - Busy-wait for a driver condition to occur * @dev: COMEDI device. * @s: COMEDI subdevice. * @insn: COMEDI instruction. * @cb: Callback to check for the condition. * @context: Private context from the driver. * * Busy-waits for up to a second (%COMEDI_TIMEOUT_MS) for the condition or * some error (other than -EBUSY) to occur. The parameters @dev, @s, @insn, * and @context are passed to the callback function, which returns -EBUSY to * continue waiting or some other value to stop waiting (generally 0 if the * condition occurred, or some error value). * * Returns -ETIMEDOUT if timed out, otherwise the return value from the * callback function. */ int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, int (*cb)(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context), unsigned long context) { unsigned long timeout = jiffies + msecs_to_jiffies(COMEDI_TIMEOUT_MS); int ret; while (time_before(jiffies, timeout)) { ret = cb(dev, s, insn, context); if (ret != -EBUSY) return ret; /* success (0) or non EBUSY errno */ cpu_relax(); } return -ETIMEDOUT; } EXPORT_SYMBOL_GPL(comedi_timeout); /** * comedi_dio_insn_config() - Boilerplate (*insn_config) for DIO subdevices * @dev: COMEDI device. * @s: COMEDI subdevice. * @insn: COMEDI instruction. * @data: Instruction parameters and return data. * @mask: io_bits mask for grouped channels, or 0 for single channel. * * If @mask is 0, it is replaced with a single-bit mask corresponding to the * channel number specified by @insn->chanspec. Otherwise, @mask * corresponds to a group of channels (which should include the specified * channel) that are always configured together as inputs or outputs. * * Partially handles the %INSN_CONFIG_DIO_INPUT, %INSN_CONFIG_DIO_OUTPUTS, * and %INSN_CONFIG_DIO_QUERY instructions. The first two update * @s->io_bits to record the directions of the masked channels. The last * one sets @data[1] to the current direction of the group of channels * (%COMEDI_INPUT) or %COMEDI_OUTPUT) as recorded in @s->io_bits. * * The caller is responsible for updating the DIO direction in the hardware * registers if this function returns 0. * * Returns 0 for a %INSN_CONFIG_DIO_INPUT or %INSN_CONFIG_DIO_OUTPUT * instruction, @insn->n (> 0) for a %INSN_CONFIG_DIO_QUERY instruction, or * -EINVAL for some other instruction. */ int comedi_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data, unsigned int mask) { unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec); if (!mask) mask = chan_mask; switch (data[0]) { case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~mask; break; case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= mask; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; default: return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(comedi_dio_insn_config); /** * comedi_dio_update_state() - Update the internal state of DIO subdevices * @s: COMEDI subdevice. * @data: The channel mask and bits to update. * * Updates @s->state which holds the internal state of the outputs for DIO * or DO subdevices (up to 32 channels). @data[0] contains a bit-mask of * the channels to be updated. @data[1] contains a bit-mask of those * channels to be set to '1'. The caller is responsible for updating the * outputs in hardware according to @s->state. As a minimum, the channels * in the returned bit-mask need to be updated. * * Returns @mask with non-existent channels removed. */ unsigned int comedi_dio_update_state(struct comedi_subdevice *s, unsigned int *data) { unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1) : 0xffffffff; unsigned int mask = data[0] & chanmask; unsigned int bits = data[1]; if (mask) { s->state &= ~mask; s->state |= (bits & mask); } return mask; } EXPORT_SYMBOL_GPL(comedi_dio_update_state); /** * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in * bytes * @s: COMEDI subdevice. * @cmd: COMEDI command. * * Determines the overall scan length according to the subdevice type and the * number of channels in the scan for the specified command. * * For digital input, output or input/output subdevices, samples for * multiple channels are assumed to be packed into one or more unsigned * short or unsigned int values according to the subdevice's %SDF_LSAMPL * flag. For other types of subdevice, samples are assumed to occupy a * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. * * Returns the overall scan length in bytes. */ unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int num_samples; unsigned int bits_per_sample; switch (s->type) { case COMEDI_SUBD_DI: case COMEDI_SUBD_DO: case COMEDI_SUBD_DIO: bits_per_sample = 8 * comedi_bytes_per_sample(s); num_samples = DIV_ROUND_UP(cmd->scan_end_arg, bits_per_sample); break; default: num_samples = cmd->scan_end_arg; break; } return comedi_samples_to_bytes(s, num_samples); } EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd); /** * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes * @s: COMEDI subdevice. * * Determines the overall scan length according to the subdevice type and the * number of channels in the scan for the current command. * * For digital input, output or input/output subdevices, samples for * multiple channels are assumed to be packed into one or more unsigned * short or unsigned int values according to the subdevice's %SDF_LSAMPL * flag. For other types of subdevice, samples are assumed to occupy a * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. * * Returns the overall scan length in bytes. */ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; return comedi_bytes_per_scan_cmd(s, cmd); } EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, unsigned int nscans) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; if (cmd->stop_src == TRIG_COUNT) { unsigned int scans_left = 0; if (async->scans_done < cmd->stop_arg) scans_left = cmd->stop_arg - async->scans_done; if (nscans > scans_left) nscans = scans_left; } return nscans; } /** * comedi_nscans_left() - Return the number of scans left in the command * @s: COMEDI subdevice. * @nscans: The expected number of scans or 0 for all available scans. * * If @nscans is 0, it is set to the number of scans available in the * async buffer. * * If the async command has a stop_src of %TRIG_COUNT, the @nscans will be * checked against the number of scans remaining to complete the command. * * The return value will then be either the expected number of scans or the * number of scans remaining to complete the command, whichever is fewer. */ unsigned int comedi_nscans_left(struct comedi_subdevice *s, unsigned int nscans) { if (nscans == 0) { unsigned int nbytes = comedi_buf_read_n_available(s); nscans = nbytes / comedi_bytes_per_scan(s); } return __comedi_nscans_left(s, nscans); } EXPORT_SYMBOL_GPL(comedi_nscans_left); /** * comedi_nsamples_left() - Return the number of samples left in the command * @s: COMEDI subdevice. * @nsamples: The expected number of samples. * * Returns the number of samples remaining to complete the command, or the * specified expected number of samples (@nsamples), whichever is fewer. */ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, unsigned int nsamples) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned long long scans_left; unsigned long long samples_left; if (cmd->stop_src != TRIG_COUNT) return nsamples; scans_left = __comedi_nscans_left(s, cmd->stop_arg); if (!scans_left) return 0; samples_left = scans_left * cmd->scan_end_arg - comedi_bytes_to_samples(s, async->scan_progress); if (samples_left < nsamples) return samples_left; return nsamples; } EXPORT_SYMBOL_GPL(comedi_nsamples_left); /** * comedi_inc_scan_progress() - Update scan progress in asynchronous command * @s: COMEDI subdevice. * @num_bytes: Amount of data in bytes to increment scan progress. * * Increments the scan progress by the number of bytes specified by @num_bytes. * If the scan progress reaches or exceeds the scan length in bytes, reduce * it modulo the scan length in bytes and set the "end of scan" asynchronous * event flag (%COMEDI_CB_EOS) to be processed later. */ void comedi_inc_scan_progress(struct comedi_subdevice *s, unsigned int num_bytes) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int scan_length = comedi_bytes_per_scan(s); /* track the 'cur_chan' for non-SDF_PACKED subdevices */ if (!(s->subdev_flags & SDF_PACKED)) { async->cur_chan += comedi_bytes_to_samples(s, num_bytes); async->cur_chan %= cmd->chanlist_len; } async->scan_progress += num_bytes; if (async->scan_progress >= scan_length) { unsigned int nscans = async->scan_progress / scan_length; if (async->scans_done < (UINT_MAX - nscans)) async->scans_done += nscans; else async->scans_done = UINT_MAX; async->scan_progress %= scan_length; async->events |= COMEDI_CB_EOS; } } EXPORT_SYMBOL_GPL(comedi_inc_scan_progress); /** * comedi_handle_events() - Handle events and possibly stop acquisition * @dev: COMEDI device. * @s: COMEDI subdevice. * * Handles outstanding asynchronous acquisition event flags associated * with the subdevice. Call the subdevice's @s->cancel() handler if the * "end of acquisition", "error" or "overflow" event flags are set in order * to stop the acquisition at the driver level. * * Calls comedi_event() to further process the event flags, which may mark * the asynchronous command as no longer running, possibly terminated with * an error, and may wake up tasks. * * Return a bit-mask of the handled events. */ unsigned int comedi_handle_events(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned int events = s->async->events; if (events == 0) return events; if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel) s->cancel(dev, s); comedi_event(dev, s); return events; } EXPORT_SYMBOL_GPL(comedi_handle_events); static int insn_rw_emulate_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct comedi_insn _insn; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int base_chan = (chan < 32) ? 0 : chan; unsigned int _data[2]; int ret; memset(_data, 0, sizeof(_data)); memset(&_insn, 0, sizeof(_insn)); _insn.insn = INSN_BITS; _insn.chanspec = base_chan; _insn.n = 2; _insn.subdev = insn->subdev; if (insn->insn == INSN_WRITE) { if (!(s->subdev_flags & SDF_WRITABLE)) return -EINVAL; _data[0] = 1 << (chan - base_chan); /* mask */ _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */ } ret = s->insn_bits(dev, s, &_insn, _data); if (ret < 0) return ret; if (insn->insn == INSN_READ) data[0] = (_data[1] >> (chan - base_chan)) & 1; return 1; } static int __comedi_device_postconfig_async(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async; unsigned int buf_size; int ret; lockdep_assert_held(&dev->mutex); if ((s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) == 0) { dev_warn(dev->class_dev, "async subdevices must support SDF_CMD_READ or SDF_CMD_WRITE\n"); return -EINVAL; } if (!s->do_cmdtest) { dev_warn(dev->class_dev, "async subdevices must have a do_cmdtest() function\n"); return -EINVAL; } if (!s->cancel) dev_warn(dev->class_dev, "async subdevices should have a cancel() function\n"); async = kzalloc(sizeof(*async), GFP_KERNEL); if (!async) return -ENOMEM; init_waitqueue_head(&async->wait_head); s->async = async; async->max_bufsize = comedi_default_buf_maxsize_kb * 1024; buf_size = comedi_default_buf_size_kb * 1024; if (buf_size > async->max_bufsize) buf_size = async->max_bufsize; if (comedi_buf_alloc(dev, s, buf_size) < 0) { dev_warn(dev->class_dev, "Buffer allocation failed\n"); return -ENOMEM; } if (s->buf_change) { ret = s->buf_change(dev, s); if (ret < 0) return ret; } comedi_alloc_subdevice_minor(s); return 0; } static int __comedi_device_postconfig(struct comedi_device *dev) { struct comedi_subdevice *s; int ret; int i; lockdep_assert_held(&dev->mutex); if (!dev->insn_device_config) dev->insn_device_config = insn_device_inval; if (!dev->get_valid_routes) dev->get_valid_routes = get_zero_valid_routes; for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->type == COMEDI_SUBD_UNUSED) continue; if (s->type == COMEDI_SUBD_DO) { if (s->n_chan < 32) s->io_bits = (1 << s->n_chan) - 1; else s->io_bits = 0xffffffff; } if (s->len_chanlist == 0) s->len_chanlist = 1; if (s->do_cmd) { ret = __comedi_device_postconfig_async(dev, s); if (ret) return ret; } if (!s->range_table && !s->range_table_list) s->range_table = &range_unknown; if (!s->insn_read && s->insn_bits) s->insn_read = insn_rw_emulate_bits; if (!s->insn_write && s->insn_bits) s->insn_write = insn_rw_emulate_bits; if (!s->insn_read) s->insn_read = insn_inval; if (!s->insn_write) s->insn_write = insn_inval; if (!s->insn_bits) s->insn_bits = insn_inval; if (!s->insn_config) s->insn_config = insn_inval; if (!s->poll) s->poll = poll_invalid; } return 0; } /* do a little post-config cleanup */ static int comedi_device_postconfig(struct comedi_device *dev) { int ret; lockdep_assert_held(&dev->mutex); ret = __comedi_device_postconfig(dev); if (ret < 0) return ret; down_write(&dev->attach_lock); dev->attached = true; up_write(&dev->attach_lock); return 0; } /* * Generic recognize function for drivers that register their supported * board names. * * 'driv->board_name' points to a 'const char *' member within the * zeroth element of an array of some private board information * structure, say 'struct foo_board' containing a member 'const char * *board_name' that is initialized to point to a board name string that * is one of the candidates matched against this function's 'name' * parameter. * * 'driv->offset' is the size of the private board information * structure, say 'sizeof(struct foo_board)', and 'driv->num_names' is * the length of the array of private board information structures. * * If one of the board names in the array of private board information * structures matches the name supplied to this function, the function * returns a pointer to the pointer to the board name, otherwise it * returns NULL. The return value ends up in the 'board_ptr' member of * a 'struct comedi_device' that the low-level comedi driver's * 'attach()' hook can convert to a point to a particular element of its * array of private board information structures by subtracting the * offset of the member that points to the board name. (No subtraction * is required if the board name pointer is the first member of the * private board information structure, which is generally the case.) */ static void *comedi_recognize(struct comedi_driver *driv, const char *name) { char **name_ptr = (char **)driv->board_name; int i; for (i = 0; i < driv->num_names; i++) { if (strcmp(*name_ptr, name) == 0) return name_ptr; name_ptr = (void *)name_ptr + driv->offset; } return NULL; } static void comedi_report_boards(struct comedi_driver *driv) { unsigned int i; const char *const *name_ptr; pr_info("comedi: valid board names for %s driver are:\n", driv->driver_name); name_ptr = driv->board_name; for (i = 0; i < driv->num_names; i++) { pr_info(" %s\n", *name_ptr); name_ptr = (const char **)((char *)name_ptr + driv->offset); } if (driv->num_names == 0) pr_info(" %s\n", driv->driver_name); } /** * comedi_load_firmware() - Request and load firmware for a device * @dev: COMEDI device. * @device: Hardware device. * @name: The name of the firmware image. * @cb: Callback to the upload the firmware image. * @context: Private context from the driver. * * Sends a firmware request for the hardware device and waits for it. Calls * the callback function to upload the firmware to the device, them releases * the firmware. * * Returns 0 on success, -EINVAL if @cb is NULL, or a negative error number * from the firmware request or the callback function. */ int comedi_load_firmware(struct comedi_device *dev, struct device *device, const char *name, int (*cb)(struct comedi_device *dev, const u8 *data, size_t size, unsigned long context), unsigned long context) { const struct firmware *fw; int ret; if (!cb) return -EINVAL; ret = request_firmware(&fw, name, device); if (ret == 0) { ret = cb(dev, fw->data, fw->size, context); release_firmware(fw); } return min(ret, 0); } EXPORT_SYMBOL_GPL(comedi_load_firmware); /** * __comedi_request_region() - Request an I/O region for a legacy driver * @dev: COMEDI device. * @start: Base address of the I/O region. * @len: Length of the I/O region. * * Requests the specified I/O port region which must start at a non-zero * address. * * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request * fails. */ int __comedi_request_region(struct comedi_device *dev, unsigned long start, unsigned long len) { if (!start) { dev_warn(dev->class_dev, "%s: a I/O base address must be specified\n", dev->board_name); return -EINVAL; } if (!request_region(start, len, dev->board_name)) { dev_warn(dev->class_dev, "%s: I/O port conflict (%#lx,%lu)\n", dev->board_name, start, len); return -EIO; } return 0; } EXPORT_SYMBOL_GPL(__comedi_request_region); /** * comedi_request_region() - Request an I/O region for a legacy driver * @dev: COMEDI device. * @start: Base address of the I/O region. * @len: Length of the I/O region. * * Requests the specified I/O port region which must start at a non-zero * address. * * On success, @dev->iobase is set to the base address of the region and * @dev->iolen is set to its length. * * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request * fails. */ int comedi_request_region(struct comedi_device *dev, unsigned long start, unsigned long len) { int ret; ret = __comedi_request_region(dev, start, len); if (ret == 0) { dev->iobase = start; dev->iolen = len; } return ret; } EXPORT_SYMBOL_GPL(comedi_request_region); /** * comedi_legacy_detach() - A generic (*detach) function for legacy drivers * @dev: COMEDI device. * * This is a simple, generic 'detach' handler for legacy COMEDI devices that * just use a single I/O port region and possibly an IRQ and that don't need * any special clean-up for their private device or subdevice storage. It * can also be called by a driver-specific 'detach' handler. * * If @dev->irq is non-zero, the IRQ will be freed. If @dev->iobase and * @dev->iolen are both non-zero, the I/O port region will be released. */ void comedi_legacy_detach(struct comedi_device *dev) { if (dev->irq) { free_irq(dev->irq, dev); dev->irq = 0; } if (dev->iobase && dev->iolen) { release_region(dev->iobase, dev->iolen); dev->iobase = 0; dev->iolen = 0; } } EXPORT_SYMBOL_GPL(comedi_legacy_detach); int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_driver *driv; int ret; lockdep_assert_held(&dev->mutex); if (dev->attached) return -EBUSY; mutex_lock(&comedi_drivers_list_lock); for (driv = comedi_drivers; driv; driv = driv->next) { if (!try_module_get(driv->module)) continue; if (driv->num_names) { dev->board_ptr = comedi_recognize(driv, it->board_name); if (dev->board_ptr) break; } else if (strcmp(driv->driver_name, it->board_name) == 0) { break; } module_put(driv->module); } if (!driv) { /* recognize has failed if we get here */ /* report valid board names before returning error */ for (driv = comedi_drivers; driv; driv = driv->next) { if (!try_module_get(driv->module)) continue; comedi_report_boards(driv); module_put(driv->module); } ret = -EIO; goto out; } if (!driv->attach) { /* driver does not support manual configuration */ dev_warn(dev->class_dev, "driver '%s' does not support attach using comedi_config\n", driv->driver_name); module_put(driv->module); ret = -EIO; goto out; } dev->driver = driv; dev->board_name = dev->board_ptr ? *(const char **)dev->board_ptr : dev->driver->driver_name; ret = driv->attach(dev, it); if (ret >= 0) ret = comedi_device_postconfig(dev); if (ret < 0) { comedi_device_detach(dev); module_put(driv->module); } /* On success, the driver module count has been incremented. */ out: mutex_unlock(&comedi_drivers_list_lock); return ret; } /** * comedi_auto_config() - Create a COMEDI device for a hardware device * @hardware_device: Hardware device. * @driver: COMEDI low-level driver for the hardware device. * @context: Driver context for the auto_attach handler. * * Allocates a new COMEDI device for the hardware device and calls the * low-level driver's 'auto_attach' handler to set-up the hardware and * allocate the COMEDI subdevices. Additional "post-configuration" setting * up is performed on successful return from the 'auto_attach' handler. * If the 'auto_attach' handler fails, the low-level driver's 'detach' * handler will be called as part of the clean-up. * * This is usually called from a wrapper function in a bus-specific COMEDI * module, which in turn is usually called from a bus device 'probe' * function in the low-level driver. * * Returns 0 on success, -EINVAL if the parameters are invalid or the * post-configuration determines the driver has set the COMEDI device up * incorrectly, -ENOMEM if failed to allocate memory, -EBUSY if run out of * COMEDI minor device numbers, or some negative error number returned by * the driver's 'auto_attach' handler. */ int comedi_auto_config(struct device *hardware_device, struct comedi_driver *driver, unsigned long context) { struct comedi_device *dev; int ret; if (!hardware_device) { pr_warn("BUG! %s called with NULL hardware_device\n", __func__); return -EINVAL; } if (!driver) { dev_warn(hardware_device, "BUG! %s called with NULL comedi driver\n", __func__); return -EINVAL; } if (!driver->auto_attach) { dev_warn(hardware_device, "BUG! comedi driver '%s' has no auto_attach handler\n", driver->driver_name); return -EINVAL; } dev = comedi_alloc_board_minor(hardware_device); if (IS_ERR(dev)) { dev_warn(hardware_device, "driver '%s' could not create device.\n", driver->driver_name); return PTR_ERR(dev); } /* Note: comedi_alloc_board_minor() locked dev->mutex. */ lockdep_assert_held(&dev->mutex); dev->driver = driver; dev->board_name = dev->driver->driver_name; ret = driver->auto_attach(dev, context); if (ret >= 0) ret = comedi_device_postconfig(dev); if (ret < 0) { dev_warn(hardware_device, "driver '%s' failed to auto-configure device.\n", driver->driver_name); mutex_unlock(&dev->mutex); comedi_release_hardware_device(hardware_device); } else { /* * class_dev should be set properly here * after a successful auto config */ dev_info(dev->class_dev, "driver '%s' has successfully auto-configured '%s'.\n", driver->driver_name, dev->board_name); mutex_unlock(&dev->mutex); } return ret; } EXPORT_SYMBOL_GPL(comedi_auto_config); /** * comedi_auto_unconfig() - Unconfigure auto-allocated COMEDI device * @hardware_device: Hardware device previously passed to * comedi_auto_config(). * * Cleans up and eventually destroys the COMEDI device allocated by * comedi_auto_config() for the same hardware device. As part of this * clean-up, the low-level COMEDI driver's 'detach' handler will be called. * (The COMEDI device itself will persist in an unattached state if it is * still open, until it is released, and any mmapped buffers will persist * until they are munmapped.) * * This is usually called from a wrapper module in a bus-specific COMEDI * module, which in turn is usually set as the bus device 'remove' function * in the low-level COMEDI driver. */ void comedi_auto_unconfig(struct device *hardware_device) { if (!hardware_device) return; comedi_release_hardware_device(hardware_device); } EXPORT_SYMBOL_GPL(comedi_auto_unconfig); /** * comedi_driver_register() - Register a low-level COMEDI driver * @driver: Low-level COMEDI driver. * * The low-level COMEDI driver is added to the list of registered COMEDI * drivers. This is used by the handler for the "/proc/comedi" file and is * also used by the handler for the %COMEDI_DEVCONFIG ioctl to configure * "legacy" COMEDI devices (for those low-level drivers that support it). * * Returns 0. */ int comedi_driver_register(struct comedi_driver *driver) { mutex_lock(&comedi_drivers_list_lock); driver->next = comedi_drivers; comedi_drivers = driver; mutex_unlock(&comedi_drivers_list_lock); return 0; } EXPORT_SYMBOL_GPL(comedi_driver_register); /** * comedi_driver_unregister() - Unregister a low-level COMEDI driver * @driver: Low-level COMEDI driver. * * The low-level COMEDI driver is removed from the list of registered COMEDI * drivers. Detaches any COMEDI devices attached to the driver, which will * result in the low-level driver's 'detach' handler being called for those * devices before this function returns. */ void comedi_driver_unregister(struct comedi_driver *driver) { struct comedi_driver *prev; int i; /* unlink the driver */ mutex_lock(&comedi_drivers_list_lock); if (comedi_drivers == driver) { comedi_drivers = driver->next; } else { for (prev = comedi_drivers; prev->next; prev = prev->next) { if (prev->next == driver) { prev->next = driver->next; break; } } } mutex_unlock(&comedi_drivers_list_lock); /* check for devices using this driver */ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) { struct comedi_device *dev = comedi_dev_get_from_minor(i); if (!dev) continue; mutex_lock(&dev->mutex); if (dev->attached && dev->driver == driver) { if (dev->use_count) dev_warn(dev->class_dev, "BUG! detaching device with use_count=%d\n", dev->use_count); comedi_device_detach(dev); } mutex_unlock(&dev->mutex); comedi_dev_put(dev); } } EXPORT_SYMBOL_GPL(comedi_driver_unregister); |
5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 | // SPDX-License-Identifier: GPL-2.0-only /* * NXP Wireless LAN device driver: major functions * * Copyright 2011-2020 NXP */ #include <linux/suspend.h> #include "main.h" #include "wmm.h" #include "cfg80211.h" #include "11n.h" #define VERSION "1.0" #define MFG_FIRMWARE "mwifiex_mfg.bin" static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK; module_param(debug_mask, uint, 0); MODULE_PARM_DESC(debug_mask, "bitmap for debug flags"); const char driver_version[] = "mwifiex " VERSION " (%s) "; static char *cal_data_cfg; module_param(cal_data_cfg, charp, 0); static unsigned short driver_mode; module_param(driver_mode, ushort, 0); MODULE_PARM_DESC(driver_mode, "station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7"); bool mfg_mode; module_param(mfg_mode, bool, 0); MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0"); bool aggr_ctrl; module_param(aggr_ctrl, bool, 0000); MODULE_PARM_DESC(aggr_ctrl, "usb tx aggregation enable:1, disable:0"); const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; /* * This function registers the device and performs all the necessary * initializations. * * The following initialization operations are performed - * - Allocate adapter structure * - Save interface specific operations table in adapter * - Call interface specific initialization routine * - Allocate private structures * - Set default adapter structure parameters * - Initialize locks * * In case of any errors during inittialization, this function also ensures * proper cleanup before exiting. */ static int mwifiex_register(void *card, struct device *dev, struct mwifiex_if_ops *if_ops, void **padapter) { struct mwifiex_adapter *adapter; int i; adapter = kzalloc(sizeof(struct mwifiex_adapter), GFP_KERNEL); if (!adapter) return -ENOMEM; *padapter = adapter; adapter->dev = dev; adapter->card = card; /* Save interface specific operations in adapter */ memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops)); adapter->debug_mask = debug_mask; /* card specific initialization has been deferred until now .. */ if (adapter->if_ops.init_if) if (adapter->if_ops.init_if(adapter)) goto error; adapter->priv_num = 0; for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) { /* Allocate memory for private structure */ adapter->priv[i] = kzalloc(sizeof(struct mwifiex_private), GFP_KERNEL); if (!adapter->priv[i]) goto error; adapter->priv[i]->adapter = adapter; adapter->priv_num++; } mwifiex_init_lock_list(adapter); timer_setup(&adapter->cmd_timer, mwifiex_cmd_timeout_func, 0); return 0; error: mwifiex_dbg(adapter, ERROR, "info: leave mwifiex_register with error\n"); for (i = 0; i < adapter->priv_num; i++) kfree(adapter->priv[i]); kfree(adapter); return -1; } /* * This function unregisters the device and performs all the necessary * cleanups. * * The following cleanup operations are performed - * - Free the timers * - Free beacon buffers * - Free private structures * - Free adapter structure */ static int mwifiex_unregister(struct mwifiex_adapter *adapter) { s32 i; if (adapter->if_ops.cleanup_if) adapter->if_ops.cleanup_if(adapter); timer_shutdown_sync(&adapter->cmd_timer); /* Free private structures */ for (i = 0; i < adapter->priv_num; i++) { mwifiex_free_curr_bcn(adapter->priv[i]); kfree(adapter->priv[i]); } if (adapter->nd_info) { for (i = 0 ; i < adapter->nd_info->n_matches ; i++) kfree(adapter->nd_info->matches[i]); kfree(adapter->nd_info); adapter->nd_info = NULL; } kfree(adapter->regd); kfree(adapter); return 0; } void mwifiex_queue_main_work(struct mwifiex_adapter *adapter) { unsigned long flags; spin_lock_irqsave(&adapter->main_proc_lock, flags); if (adapter->mwifiex_processing) { adapter->more_task_flag = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); } else { spin_unlock_irqrestore(&adapter->main_proc_lock, flags); queue_work(adapter->workqueue, &adapter->main_work); } } EXPORT_SYMBOL_GPL(mwifiex_queue_main_work); static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter) { spin_lock_bh(&adapter->rx_proc_lock); if (adapter->rx_processing) { spin_unlock_bh(&adapter->rx_proc_lock); } else { spin_unlock_bh(&adapter->rx_proc_lock); queue_work(adapter->rx_workqueue, &adapter->rx_work); } } static int mwifiex_process_rx(struct mwifiex_adapter *adapter) { struct sk_buff *skb; struct mwifiex_rxinfo *rx_info; spin_lock_bh(&adapter->rx_proc_lock); if (adapter->rx_processing || adapter->rx_locked) { spin_unlock_bh(&adapter->rx_proc_lock); goto exit_rx_proc; } else { adapter->rx_processing = true; spin_unlock_bh(&adapter->rx_proc_lock); } /* Check for Rx data */ while ((skb = skb_dequeue(&adapter->rx_data_q))) { atomic_dec(&adapter->rx_pending); if ((adapter->delay_main_work || adapter->iface_type == MWIFIEX_USB) && (atomic_read(&adapter->rx_pending) < LOW_RX_PENDING)) { if (adapter->if_ops.submit_rem_rx_urbs) adapter->if_ops.submit_rem_rx_urbs(adapter); adapter->delay_main_work = false; mwifiex_queue_main_work(adapter); } rx_info = MWIFIEX_SKB_RXCB(skb); if (rx_info->buf_type == MWIFIEX_TYPE_AGGR_DATA) { if (adapter->if_ops.deaggr_pkt) adapter->if_ops.deaggr_pkt(adapter, skb); dev_kfree_skb_any(skb); } else { mwifiex_handle_rx_packet(adapter, skb); } } spin_lock_bh(&adapter->rx_proc_lock); adapter->rx_processing = false; spin_unlock_bh(&adapter->rx_proc_lock); exit_rx_proc: return 0; } static void maybe_quirk_fw_disable_ds(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); struct mwifiex_ver_ext ver_ext; if (test_and_set_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &adapter->work_flags)) return; memset(&ver_ext, 0, sizeof(ver_ext)); ver_ext.version_str_sel = 1; if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT, HostCmd_ACT_GEN_GET, 0, &ver_ext, false)) { mwifiex_dbg(priv->adapter, MSG, "Checking hardware revision failed.\n"); } } /* * The main process. * * This function is the main procedure of the driver and handles various driver * operations. It runs in a loop and provides the core functionalities. * * The main responsibilities of this function are - * - Ensure concurrency control * - Handle pending interrupts and call interrupt handlers * - Wake up the card if required * - Handle command responses and call response handlers * - Handle events and call event handlers * - Execute pending commands * - Transmit pending data packets */ int mwifiex_main_process(struct mwifiex_adapter *adapter) { int ret = 0; unsigned long flags; spin_lock_irqsave(&adapter->main_proc_lock, flags); /* Check if already processing */ if (adapter->mwifiex_processing || adapter->main_locked) { adapter->more_task_flag = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); return 0; } else { adapter->mwifiex_processing = true; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); } process_start: do { if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) break; /* For non-USB interfaces, If we process interrupts first, it * would increase RX pending even further. Avoid this by * checking if rx_pending has crossed high threshold and * schedule rx work queue and then process interrupts. * For USB interface, there are no interrupts. We already have * HIGH_RX_PENDING check in usb.c */ if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING && adapter->iface_type != MWIFIEX_USB) { adapter->delay_main_work = true; mwifiex_queue_rx_work(adapter); break; } /* Handle pending interrupt if any */ if (adapter->int_status) { if (adapter->hs_activated) mwifiex_process_hs_config(adapter); if (adapter->if_ops.process_int_status) adapter->if_ops.process_int_status(adapter); } if (adapter->rx_work_enabled && adapter->data_received) mwifiex_queue_rx_work(adapter); /* Need to wake up the card ? */ if ((adapter->ps_state == PS_STATE_SLEEP) && (adapter->pm_wakeup_card_req && !adapter->pm_wakeup_fw_try) && (is_command_pending(adapter) || !skb_queue_empty(&adapter->tx_data_q) || !mwifiex_bypass_txlist_empty(adapter) || !mwifiex_wmm_lists_empty(adapter))) { adapter->pm_wakeup_fw_try = true; mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3)); adapter->if_ops.wakeup(adapter); continue; } if (IS_CARD_RX_RCVD(adapter)) { adapter->data_received = false; adapter->pm_wakeup_fw_try = false; del_timer(&adapter->wakeup_timer); if (adapter->ps_state == PS_STATE_SLEEP) adapter->ps_state = PS_STATE_AWAKE; } else { /* We have tried to wakeup the card already */ if (adapter->pm_wakeup_fw_try) break; if (adapter->ps_state == PS_STATE_PRE_SLEEP) mwifiex_check_ps_cond(adapter); if (adapter->ps_state != PS_STATE_AWAKE) break; if (adapter->tx_lock_flag) { if (adapter->iface_type == MWIFIEX_USB) { if (!adapter->usb_mc_setup) break; } else break; } if ((!adapter->scan_chan_gap_enabled && adapter->scan_processing) || adapter->data_sent || mwifiex_is_tdls_chan_switching (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA)) || (mwifiex_wmm_lists_empty(adapter) && mwifiex_bypass_txlist_empty(adapter) && skb_queue_empty(&adapter->tx_data_q))) { if (adapter->cmd_sent || adapter->curr_cmd || !mwifiex_is_send_cmd_allowed (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA)) || (!is_command_pending(adapter))) break; } } /* Check for event */ if (adapter->event_received) { adapter->event_received = false; mwifiex_process_event(adapter); } /* Check for Cmd Resp */ if (adapter->cmd_resp_received) { adapter->cmd_resp_received = false; mwifiex_process_cmdresp(adapter); /* call mwifiex back when init_fw is done */ if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) { adapter->hw_status = MWIFIEX_HW_STATUS_READY; mwifiex_init_fw_complete(adapter); maybe_quirk_fw_disable_ds(adapter); } } /* Check if we need to confirm Sleep Request received previously */ if (adapter->ps_state == PS_STATE_PRE_SLEEP) mwifiex_check_ps_cond(adapter); /* * The ps_state may have been changed during processing of * Sleep Request event. */ if ((adapter->ps_state == PS_STATE_SLEEP) || (adapter->ps_state == PS_STATE_PRE_SLEEP) || (adapter->ps_state == PS_STATE_SLEEP_CFM)) { continue; } if (adapter->tx_lock_flag) { if (adapter->iface_type == MWIFIEX_USB) { if (!adapter->usb_mc_setup) continue; } else continue; } if (!adapter->cmd_sent && !adapter->curr_cmd && mwifiex_is_send_cmd_allowed (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { if (mwifiex_exec_next_cmd(adapter) == -1) { ret = -1; break; } } /** If USB Multi channel setup ongoing, * wait for ready to tx data. */ if (adapter->iface_type == MWIFIEX_USB && adapter->usb_mc_setup) continue; if ((adapter->scan_chan_gap_enabled || !adapter->scan_processing) && !adapter->data_sent && !skb_queue_empty(&adapter->tx_data_q)) { if (adapter->hs_activated_manually) { mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), MWIFIEX_ASYNC_CMD); adapter->hs_activated_manually = false; } mwifiex_process_tx_queue(adapter); if (adapter->hs_activated) { clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); mwifiex_hs_activated_event (mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), false); } } if ((adapter->scan_chan_gap_enabled || !adapter->scan_processing) && !adapter->data_sent && !mwifiex_bypass_txlist_empty(adapter) && !mwifiex_is_tdls_chan_switching (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { if (adapter->hs_activated_manually) { mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), MWIFIEX_ASYNC_CMD); adapter->hs_activated_manually = false; } mwifiex_process_bypass_tx(adapter); if (adapter->hs_activated) { clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); mwifiex_hs_activated_event (mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), false); } } if ((adapter->scan_chan_gap_enabled || !adapter->scan_processing) && !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) && !mwifiex_is_tdls_chan_switching (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { if (adapter->hs_activated_manually) { mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), MWIFIEX_ASYNC_CMD); adapter->hs_activated_manually = false; } mwifiex_wmm_process_tx(adapter); if (adapter->hs_activated) { clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags); mwifiex_hs_activated_event (mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY), false); } } if (adapter->delay_null_pkt && !adapter->cmd_sent && !adapter->curr_cmd && !is_command_pending(adapter) && (mwifiex_wmm_lists_empty(adapter) && mwifiex_bypass_txlist_empty(adapter) && skb_queue_empty(&adapter->tx_data_q))) { if (!mwifiex_send_null_packet (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET | MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET)) { adapter->delay_null_pkt = false; adapter->ps_state = PS_STATE_SLEEP; } break; } } while (true); spin_lock_irqsave(&adapter->main_proc_lock, flags); if (adapter->more_task_flag) { adapter->more_task_flag = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); goto process_start; } adapter->mwifiex_processing = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); return ret; } EXPORT_SYMBOL_GPL(mwifiex_main_process); /* * This function frees the adapter structure. * * Additionally, this closes the netlink socket, frees the timers * and private structures. */ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter) { if (!adapter) { pr_err("%s: adapter is NULL\n", __func__); return; } mwifiex_unregister(adapter); pr_debug("info: %s: free adapter\n", __func__); } /* * This function cancels all works in the queue and destroys * the main workqueue. */ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) { if (adapter->workqueue) { destroy_workqueue(adapter->workqueue); adapter->workqueue = NULL; } if (adapter->rx_workqueue) { destroy_workqueue(adapter->rx_workqueue); adapter->rx_workqueue = NULL; } if (adapter->host_mlme_workqueue) { destroy_workqueue(adapter->host_mlme_workqueue); adapter->host_mlme_workqueue = NULL; } } /* * This function gets firmware and initializes it. * * The main initialization steps followed are - * - Download the correct firmware to card * - Issue the init commands to firmware */ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) { int ret; char fmt[64]; struct mwifiex_adapter *adapter = context; struct mwifiex_fw_image fw; bool init_failed = false; struct wireless_dev *wdev; struct completion *fw_done = adapter->fw_done; if (!firmware) { mwifiex_dbg(adapter, ERROR, "Failed to get firmware %s\n", adapter->fw_name); goto err_dnld_fw; } memset(&fw, 0, sizeof(struct mwifiex_fw_image)); adapter->firmware = firmware; fw.fw_buf = (u8 *) adapter->firmware->data; fw.fw_len = adapter->firmware->size; if (adapter->if_ops.dnld_fw) { ret = adapter->if_ops.dnld_fw(adapter, &fw); } else { ret = mwifiex_dnld_fw(adapter, &fw); } if (ret == -1) goto err_dnld_fw; mwifiex_dbg(adapter, MSG, "WLAN FW is active\n"); if (cal_data_cfg) { if ((request_firmware(&adapter->cal_data, cal_data_cfg, adapter->dev)) < 0) mwifiex_dbg(adapter, ERROR, "Cal data request_firmware() failed\n"); } /* enable host interrupt after fw dnld is successful */ if (adapter->if_ops.enable_int) { if (adapter->if_ops.enable_int(adapter)) goto err_dnld_fw; } adapter->init_wait_q_woken = false; ret = mwifiex_init_fw(adapter); if (ret == -1) { goto err_init_fw; } else if (!ret) { adapter->hw_status = MWIFIEX_HW_STATUS_READY; goto done; } /* Wait for mwifiex_init to complete */ if (!adapter->mfg_mode) { wait_event_interruptible(adapter->init_wait_q, adapter->init_wait_q_woken); if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) goto err_init_fw; } if (!adapter->wiphy) { if (mwifiex_register_cfg80211(adapter)) { mwifiex_dbg(adapter, ERROR, "cannot register with cfg80211\n"); goto err_init_fw; } } if (mwifiex_init_channel_scan_gap(adapter)) { mwifiex_dbg(adapter, ERROR, "could not init channel stats table\n"); goto err_init_chan_scan; } if (driver_mode) { driver_mode &= MWIFIEX_DRIVER_MODE_BITMASK; driver_mode |= MWIFIEX_DRIVER_MODE_STA; } rtnl_lock(); wiphy_lock(adapter->wiphy); /* Create station interface by default */ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM, NL80211_IFTYPE_STATION, NULL); if (IS_ERR(wdev)) { mwifiex_dbg(adapter, ERROR, "cannot create default STA interface\n"); wiphy_unlock(adapter->wiphy); rtnl_unlock(); goto err_add_intf; } if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) { wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM, NL80211_IFTYPE_AP, NULL); if (IS_ERR(wdev)) { mwifiex_dbg(adapter, ERROR, "cannot create AP interface\n"); wiphy_unlock(adapter->wiphy); rtnl_unlock(); goto err_add_intf; } } if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) { wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", NET_NAME_ENUM, NL80211_IFTYPE_P2P_CLIENT, NULL); if (IS_ERR(wdev)) { mwifiex_dbg(adapter, ERROR, "cannot create p2p client interface\n"); wiphy_unlock(adapter->wiphy); rtnl_unlock(); goto err_add_intf; } } wiphy_unlock(adapter->wiphy); rtnl_unlock(); mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt); adapter->is_up = true; goto done; err_add_intf: vfree(adapter->chan_stats); err_init_chan_scan: wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); err_init_fw: if (adapter->if_ops.disable_int) adapter->if_ops.disable_int(adapter); err_dnld_fw: mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); mwifiex_free_cmd_buffers(adapter); } init_failed = true; done: if (adapter->cal_data) { release_firmware(adapter->cal_data); adapter->cal_data = NULL; } if (adapter->firmware) { release_firmware(adapter->firmware); adapter->firmware = NULL; } if (init_failed) { if (adapter->irq_wakeup >= 0) device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); } /* Tell all current and future waiters we're finished */ complete_all(fw_done); return init_failed ? -EIO : 0; } static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) { _mwifiex_fw_dpc(firmware, context); } /* * This function gets the firmware and (if called asynchronously) kicks off the * HW init when done. */ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter, bool req_fw_nowait) { int ret; /* Override default firmware with manufacturing one if * manufacturing mode is enabled */ if (mfg_mode) strscpy(adapter->fw_name, MFG_FIRMWARE, sizeof(adapter->fw_name)); if (req_fw_nowait) { ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name, adapter->dev, GFP_KERNEL, adapter, mwifiex_fw_dpc); } else { ret = request_firmware(&adapter->firmware, adapter->fw_name, adapter->dev); } if (ret < 0) mwifiex_dbg(adapter, ERROR, "request_firmware%s error %d\n", req_fw_nowait ? "_nowait" : "", ret); return ret; } /* * CFG802.11 network device handler for open. * * Starts the data queue. */ static int mwifiex_open(struct net_device *dev) { netif_carrier_off(dev); return 0; } /* * CFG802.11 network device handler for close. */ static int mwifiex_close(struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (priv->scan_request) { struct cfg80211_scan_info info = { .aborted = true, }; mwifiex_dbg(priv->adapter, INFO, "aborting scan on ndo_stop\n"); cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; priv->scan_aborting = true; } if (priv->sched_scanning) { mwifiex_dbg(priv->adapter, INFO, "aborting bgscan on ndo_stop\n"); mwifiex_stop_bg_scan(priv); cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0); } return 0; } static bool mwifiex_bypass_tx_queue(struct mwifiex_private *priv, struct sk_buff *skb) { struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; if (ntohs(eth_hdr->h_proto) == ETH_P_PAE || mwifiex_is_skb_mgmt_frame(skb) || (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) { mwifiex_dbg(priv->adapter, DATA, "bypass txqueue; eth type %#x, mgmt %d\n", ntohs(eth_hdr->h_proto), mwifiex_is_skb_mgmt_frame(skb)); if (eth_hdr->h_proto == htons(ETH_P_PAE)) mwifiex_dbg(priv->adapter, MSG, "key: send EAPOL to %pM\n", eth_hdr->h_dest); return true; } return false; } /* * Add buffer into wmm tx queue and queue work to transmit it. */ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) { struct netdev_queue *txq; int index = mwifiex_1d_to_wmm_queue[skb->priority]; if (atomic_inc_return(&priv->wmm_tx_pending[index]) >= MAX_TX_PENDING) { txq = netdev_get_tx_queue(priv->netdev, index); if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); mwifiex_dbg(priv->adapter, DATA, "stop queue: %d\n", index); } } if (mwifiex_bypass_tx_queue(priv, skb)) { atomic_inc(&priv->adapter->tx_pending); atomic_inc(&priv->adapter->bypass_tx_pending); mwifiex_wmm_add_buf_bypass_txqueue(priv, skb); } else { atomic_inc(&priv->adapter->tx_pending); mwifiex_wmm_add_buf_txqueue(priv, skb); } mwifiex_queue_main_work(priv->adapter); return 0; } struct sk_buff * mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, struct sk_buff *skb, u8 flag, u64 *cookie) { struct sk_buff *orig_skb = skb; struct mwifiex_txinfo *tx_info, *orig_tx_info; skb = skb_clone(skb, GFP_ATOMIC); if (skb) { int id; spin_lock_bh(&priv->ack_status_lock); id = idr_alloc(&priv->ack_status_frames, orig_skb, 1, 0x10, GFP_ATOMIC); spin_unlock_bh(&priv->ack_status_lock); if (id >= 0) { tx_info = MWIFIEX_SKB_TXCB(skb); tx_info->ack_frame_id = id; tx_info->flags |= flag; orig_tx_info = MWIFIEX_SKB_TXCB(orig_skb); orig_tx_info->ack_frame_id = id; orig_tx_info->flags |= flag; if (flag == MWIFIEX_BUF_FLAG_ACTION_TX_STATUS && cookie) orig_tx_info->cookie = *cookie; } else if (skb_shared(skb)) { kfree_skb(orig_skb); } else { kfree_skb(skb); skb = orig_skb; } } else { /* couldn't clone -- lose tx status ... */ skb = orig_skb; } return skb; } /* * CFG802.11 network device handler for data transmission. */ static netdev_tx_t mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct sk_buff *new_skb; struct mwifiex_txinfo *tx_info; bool multicast; mwifiex_dbg(priv->adapter, DATA, "data: %lu BSS(%d-%d): Data <= kernel\n", jiffies, priv->bss_type, priv->bss_num); if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) { kfree_skb(skb); priv->stats.tx_dropped++; return 0; } if (!skb->len || (skb->len > ETH_FRAME_LEN)) { mwifiex_dbg(priv->adapter, ERROR, "Tx: bad skb len %d\n", skb->len); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { mwifiex_dbg(priv->adapter, DATA, "data: Tx: insufficient skb headroom %d\n", skb_headroom(skb)); /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); if (unlikely(!new_skb)) { mwifiex_dbg(priv->adapter, ERROR, "Tx: cannot alloca new_skb\n"); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } kfree_skb(skb); skb = new_skb; mwifiex_dbg(priv->adapter, INFO, "info: new skb headroomd %d\n", skb_headroom(skb)); } tx_info = MWIFIEX_SKB_TXCB(skb); memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = skb->len; multicast = is_multicast_ether_addr(skb->data); if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS && priv->adapter->fw_api_ver == MWIFIEX_FW_V15)) skb = mwifiex_clone_skb_for_tx_status(priv, skb, MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS, NULL); /* Record the current time the packet was queued; used to * determine the amount of time the packet was queued in * the driver before it was sent to the firmware. * The delay is then sent along with the packet to the * firmware for aggregate delay calculation for stats and * MSDU lifetime expiry. */ __net_timestamp(skb); if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->bss_type == MWIFIEX_BSS_TYPE_STA && !ether_addr_equal_unaligned(priv->cfg_bssid, skb->data)) { if (priv->adapter->auto_tdls && priv->check_tdls_tx) mwifiex_tdls_check_tx(priv, skb); } mwifiex_queue_tx_pkt(priv, skb); return 0; } int mwifiex_set_mac_address(struct mwifiex_private *priv, struct net_device *dev, bool external, u8 *new_mac) { int ret; u64 mac_addr, old_mac_addr; old_mac_addr = ether_addr_to_u64(priv->curr_addr); if (external) { mac_addr = ether_addr_to_u64(new_mac); } else { /* Internal mac address change */ if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY) return -EOPNOTSUPP; mac_addr = old_mac_addr; if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) { mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); mac_addr += priv->bss_num; } else if (priv->adapter->priv[0] != priv) { /* Set mac address based on bss_type/bss_num */ mac_addr ^= BIT_ULL(priv->bss_type + 8); mac_addr += priv->bss_num; } } u64_to_ether_addr(mac_addr, priv->curr_addr); /* Send request to firmware */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS, HostCmd_ACT_GEN_SET, 0, NULL, true); if (ret) { u64_to_ether_addr(old_mac_addr, priv->curr_addr); mwifiex_dbg(priv->adapter, ERROR, "set mac address failed: ret=%d\n", ret); return ret; } eth_hw_addr_set(dev, priv->curr_addr); return 0; } /* CFG802.11 network device handler for setting MAC address. */ static int mwifiex_ndo_set_mac_address(struct net_device *dev, void *addr) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct sockaddr *hw_addr = addr; return mwifiex_set_mac_address(priv, dev, true, hw_addr->sa_data); } /* * CFG802.11 network device handler for setting multicast list. */ static void mwifiex_set_multicast_list(struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct mwifiex_multicast_list mcast_list; if (dev->flags & IFF_PROMISC) { mcast_list.mode = MWIFIEX_PROMISC_MODE; } else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > MWIFIEX_MAX_MULTICAST_LIST_SIZE) { mcast_list.mode = MWIFIEX_ALL_MULTI_MODE; } else { mcast_list.mode = MWIFIEX_MULTICAST_MODE; mcast_list.num_multicast_addr = mwifiex_copy_mcast_addr(&mcast_list, dev); } mwifiex_request_set_multicast_list(priv, &mcast_list); } /* * CFG802.11 network device handler for transmission timeout. */ static void mwifiex_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); priv->num_tx_timeout++; priv->tx_timeout_cnt++; mwifiex_dbg(priv->adapter, ERROR, "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n", jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num); mwifiex_set_trans_start(dev); if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD && priv->adapter->if_ops.card_reset) { mwifiex_dbg(priv->adapter, ERROR, "tx_timeout_cnt exceeds threshold.\t" "Triggering card reset!\n"); priv->adapter->if_ops.card_reset(priv->adapter); } } void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter) { struct usb_card_rec *card = adapter->card; struct mwifiex_private *priv; u16 tx_buf_size; int i, ret; card->mc_resync_flag = true; for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) { if (atomic_read(&card->port[i].tx_data_urb_pending)) { mwifiex_dbg(adapter, WARN, "pending data urb in sys\n"); return; } } card->mc_resync_flag = false; tx_buf_size = 0xffff; priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF, HostCmd_ACT_GEN_SET, 0, &tx_buf_size, false); if (ret) mwifiex_dbg(adapter, ERROR, "send reconfig tx buf size cmd err\n"); } EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync); void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) { /* Dump all the memory data into single file, a userspace script will * be used to split all the memory data to multiple files */ mwifiex_dbg(adapter, MSG, "== mwifiex dump information to /sys/class/devcoredump start\n"); dev_coredumpv(adapter->dev, adapter->devdump_data, adapter->devdump_len, GFP_KERNEL); mwifiex_dbg(adapter, MSG, "== mwifiex dump information to /sys/class/devcoredump end\n"); /* Device dump data will be freed in device coredump release function * after 5 min. Here reset adapter->devdump_data and ->devdump_len * to avoid it been accidentally reused. */ adapter->devdump_data = NULL; adapter->devdump_len = 0; } EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump); void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) { char *p; char drv_version[64]; struct usb_card_rec *cardp; struct sdio_mmc_card *sdio_card; struct mwifiex_private *priv; int i, idx; struct netdev_queue *txq; struct mwifiex_debug_info *debug_info; mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n"); p = adapter->devdump_data; strcpy(p, "========Start dump driverinfo========\n"); p += strlen("========Start dump driverinfo========\n"); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); mwifiex_drv_get_driver_version(adapter, drv_version, sizeof(drv_version) - 1); p += sprintf(p, "driver_version = %s\n", drv_version); if (adapter->iface_type == MWIFIEX_USB) { cardp = (struct usb_card_rec *)adapter->card; p += sprintf(p, "tx_cmd_urb_pending = %d\n", atomic_read(&cardp->tx_cmd_urb_pending)); p += sprintf(p, "tx_data_urb_pending_port_0 = %d\n", atomic_read(&cardp->port[0].tx_data_urb_pending)); p += sprintf(p, "tx_data_urb_pending_port_1 = %d\n", atomic_read(&cardp->port[1].tx_data_urb_pending)); p += sprintf(p, "rx_cmd_urb_pending = %d\n", atomic_read(&cardp->rx_cmd_urb_pending)); p += sprintf(p, "rx_data_urb_pending = %d\n", atomic_read(&cardp->rx_data_urb_pending)); } p += sprintf(p, "tx_pending = %d\n", atomic_read(&adapter->tx_pending)); p += sprintf(p, "rx_pending = %d\n", atomic_read(&adapter->rx_pending)); if (adapter->iface_type == MWIFIEX_SDIO) { sdio_card = (struct sdio_mmc_card *)adapter->card; p += sprintf(p, "\nmp_rd_bitmap=0x%x curr_rd_port=0x%x\n", sdio_card->mp_rd_bitmap, sdio_card->curr_rd_port); p += sprintf(p, "mp_wr_bitmap=0x%x curr_wr_port=0x%x\n", sdio_card->mp_wr_bitmap, sdio_card->curr_wr_port); } for (i = 0; i < adapter->priv_num; i++) { if (!adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; p += sprintf(p, "\n[interface : \"%s\"]\n", priv->netdev->name); p += sprintf(p, "wmm_tx_pending[0] = %d\n", atomic_read(&priv->wmm_tx_pending[0])); p += sprintf(p, "wmm_tx_pending[1] = %d\n", atomic_read(&priv->wmm_tx_pending[1])); p += sprintf(p, "wmm_tx_pending[2] = %d\n", atomic_read(&priv->wmm_tx_pending[2])); p += sprintf(p, "wmm_tx_pending[3] = %d\n", atomic_read(&priv->wmm_tx_pending[3])); p += sprintf(p, "media_state=\"%s\"\n", !priv->media_connected ? "Disconnected" : "Connected"); p += sprintf(p, "carrier %s\n", (netif_carrier_ok(priv->netdev) ? "on" : "off")); for (idx = 0; idx < priv->netdev->num_tx_queues; idx++) { txq = netdev_get_tx_queue(priv->netdev, idx); p += sprintf(p, "tx queue %d:%s ", idx, netif_tx_queue_stopped(txq) ? "stopped" : "started"); } p += sprintf(p, "\n%s: num_tx_timeout = %d\n", priv->netdev->name, priv->num_tx_timeout); } if (adapter->iface_type == MWIFIEX_SDIO || adapter->iface_type == MWIFIEX_PCIE) { p += sprintf(p, "\n=== %s register dump===\n", adapter->iface_type == MWIFIEX_SDIO ? "SDIO" : "PCIE"); if (adapter->if_ops.reg_dump) p += adapter->if_ops.reg_dump(adapter, p); } p += sprintf(p, "\n=== more debug information\n"); debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); if (debug_info) { for (i = 0; i < adapter->priv_num; i++) { if (!adapter->priv[i]->netdev) continue; priv = adapter->priv[i]; mwifiex_get_debug_info(priv, debug_info); p += mwifiex_debug_info_to_buffer(priv, p, debug_info); break; } kfree(debug_info); } strcpy(p, "\n========End dump========\n"); p += strlen("\n========End dump========\n"); mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n"); adapter->devdump_len = p - (char *)adapter->devdump_data; } EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump); void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter) { u8 idx; char *fw_dump_ptr; u32 dump_len = 0; for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { dump_len += (strlen("========Start dump ") + strlen(entry->mem_name) + strlen("========\n") + (entry->mem_size + 1) + strlen("\n========End dump========\n")); } } if (dump_len + 1 + adapter->devdump_len > MWIFIEX_FW_DUMP_SIZE) { /* Realloc in case buffer overflow */ fw_dump_ptr = vzalloc(dump_len + 1 + adapter->devdump_len); mwifiex_dbg(adapter, MSG, "Realloc device dump data.\n"); if (!fw_dump_ptr) { vfree(adapter->devdump_data); mwifiex_dbg(adapter, ERROR, "vzalloc devdump data failure!\n"); return; } memmove(fw_dump_ptr, adapter->devdump_data, adapter->devdump_len); vfree(adapter->devdump_data); adapter->devdump_data = fw_dump_ptr; } fw_dump_ptr = (char *)adapter->devdump_data + adapter->devdump_len; for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { strcpy(fw_dump_ptr, "========Start dump "); fw_dump_ptr += strlen("========Start dump "); strcpy(fw_dump_ptr, entry->mem_name); fw_dump_ptr += strlen(entry->mem_name); strcpy(fw_dump_ptr, "========\n"); fw_dump_ptr += strlen("========\n"); memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size); fw_dump_ptr += entry->mem_size; strcpy(fw_dump_ptr, "\n========End dump========\n"); fw_dump_ptr += strlen("\n========End dump========\n"); } } adapter->devdump_len = fw_dump_ptr - (char *)adapter->devdump_data; for (idx = 0; idx < adapter->num_mem_types; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; vfree(entry->mem_ptr); entry->mem_ptr = NULL; entry->mem_size = 0; } } EXPORT_SYMBOL_GPL(mwifiex_prepare_fw_dump_info); /* * CFG802.11 network device handler for statistics retrieval. */ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); return &priv->stats; } static u16 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { skb->priority = cfg80211_classify8021d(skb, NULL); return mwifiex_1d_to_wmm_queue[skb->priority]; } /* Network device handlers */ static const struct net_device_ops mwifiex_netdev_ops = { .ndo_open = mwifiex_open, .ndo_stop = mwifiex_close, .ndo_start_xmit = mwifiex_hard_start_xmit, .ndo_set_mac_address = mwifiex_ndo_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = mwifiex_tx_timeout, .ndo_get_stats = mwifiex_get_stats, .ndo_set_rx_mode = mwifiex_set_multicast_list, .ndo_select_queue = mwifiex_netdev_select_wmm_queue, }; /* * This function initializes the private structure parameters. * * The following wait queues are initialized - * - IOCTL wait queue * - Command wait queue * - Statistics wait queue * * ...and the following default parameters are set - * - Current key index : Set to 0 * - Rate index : Set to auto * - Media connected : Set to disconnected * - Adhoc link sensed : Set to false * - Nick name : Set to null * - Number of Tx timeout : Set to 0 * - Device address : Set to current address * - Rx histogram statistc : Set to 0 * * In addition, the CFG80211 work queue is also created. */ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; dev->needs_free_netdev = true; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; memset(priv->mgmt_ie, 0, sizeof(struct mwifiex_ie) * MAX_MGMT_IE_INDEX); priv->beacon_idx = MWIFIEX_AUTO_IDX_MASK; priv->proberesp_idx = MWIFIEX_AUTO_IDX_MASK; priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK; priv->gen_idx = MWIFIEX_AUTO_IDX_MASK; priv->num_tx_timeout = 0; if (is_valid_ether_addr(dev->dev_addr)) ether_addr_copy(priv->curr_addr, dev->dev_addr); else ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { priv->hist_data = kmalloc(sizeof(*priv->hist_data), GFP_KERNEL); if (priv->hist_data) mwifiex_hist_data_reset(priv); } } /* * This function check if command is pending. */ int is_command_pending(struct mwifiex_adapter *adapter) { int is_cmd_pend_q_empty; spin_lock_bh(&adapter->cmd_pending_q_lock); is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q); spin_unlock_bh(&adapter->cmd_pending_q_lock); return !is_cmd_pend_q_empty; } /* This is the host mlme work queue function. * It handles the host mlme operations. */ static void mwifiex_host_mlme_work_queue(struct work_struct *work) { struct mwifiex_adapter *adapter = container_of(work, struct mwifiex_adapter, host_mlme_work); if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) return; /* Check for host mlme disconnection */ if (adapter->host_mlme_link_lost) { if (adapter->priv_link_lost) { mwifiex_reset_connect_state(adapter->priv_link_lost, WLAN_REASON_DEAUTH_LEAVING, true); adapter->priv_link_lost = NULL; } adapter->host_mlme_link_lost = false; } /* Check for host mlme Assoc Resp */ if (adapter->assoc_resp_received) { mwifiex_process_assoc_resp(adapter); adapter->assoc_resp_received = false; } } /* * This is the RX work queue function. * * It handles the RX operations. */ static void mwifiex_rx_work_queue(struct work_struct *work) { struct mwifiex_adapter *adapter = container_of(work, struct mwifiex_adapter, rx_work); if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) return; mwifiex_process_rx(adapter); } /* * This is the main work queue function. * * It handles the main process, which in turn handles the complete * driver operations. */ static void mwifiex_main_work_queue(struct work_struct *work) { struct mwifiex_adapter *adapter = container_of(work, struct mwifiex_adapter, main_work); if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) return; mwifiex_main_process(adapter); } /* Common teardown code used for both device removal and reset */ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; int i; /* We can no longer handle interrupts once we start doing the teardown * below. */ if (adapter->if_ops.disable_int) adapter->if_ops.disable_int(adapter); set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); adapter->int_status = 0; /* Stop data */ for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (priv->netdev) { mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); netif_device_detach(priv->netdev); } } mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); mwifiex_shutdown_drv(adapter); mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n"); if (atomic_read(&adapter->rx_pending) || atomic_read(&adapter->tx_pending) || atomic_read(&adapter->cmd_pending)) { mwifiex_dbg(adapter, ERROR, "rx_pending=%d, tx_pending=%d,\t" "cmd_pending=%d\n", atomic_read(&adapter->rx_pending), atomic_read(&adapter->tx_pending), atomic_read(&adapter->cmd_pending)); } for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; rtnl_lock(); if (priv->netdev && priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) { /* * Close the netdev now, because if we do it later, the * netdev notifiers will need to acquire the wiphy lock * again --> deadlock. */ dev_close(priv->wdev.netdev); wiphy_lock(adapter->wiphy); mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); wiphy_unlock(adapter->wiphy); } rtnl_unlock(); } wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); adapter->wiphy = NULL; vfree(adapter->chan_stats); mwifiex_free_cmd_buffers(adapter); } /* * This function can be used for shutting down the adapter SW. */ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; if (!adapter) return 0; wait_for_completion(adapter->fw_done); /* Caller should ensure we aren't suspending while this happens */ reinit_completion(adapter->fw_done); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); mwifiex_deauthenticate(priv, NULL); mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); mwifiex_uninit_sw(adapter); adapter->is_up = false; if (adapter->if_ops.down_dev) adapter->if_ops.down_dev(adapter); return 0; } EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); /* This function can be used for reinitting the adapter SW. Required * code is extracted from mwifiex_add_card() */ int mwifiex_reinit_sw(struct mwifiex_adapter *adapter) { int ret; mwifiex_init_lock_list(adapter); if (adapter->if_ops.up_dev) adapter->if_ops.up_dev(adapter); adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); init_waitqueue_head(&adapter->init_wait_q); clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); adapter->hs_activated = false; clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags); init_waitqueue_head(&adapter->hs_activate_wait_q); init_waitqueue_head(&adapter->cmd_wait_q.wait); adapter->cmd_wait_q.status = 0; adapter->scan_wait_q_woken = false; if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) adapter->rx_work_enabled = true; adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->workqueue) goto err_kmalloc; INIT_WORK(&adapter->main_work, mwifiex_main_work_queue); if (adapter->rx_work_enabled) { adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->rx_workqueue) goto err_kmalloc; INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); } if (adapter->host_mlme_enabled) { adapter->host_mlme_workqueue = alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->host_mlme_workqueue) goto err_kmalloc; INIT_WORK(&adapter->host_mlme_work, mwifiex_host_mlme_work_queue); } /* Register the device. Fill up the private data structure with * relevant information from the card. Some code extracted from * mwifiex_register_dev() */ mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__); if (mwifiex_init_hw_fw(adapter, false)) { mwifiex_dbg(adapter, ERROR, "%s: firmware init failed\n", __func__); goto err_init_fw; } /* _mwifiex_fw_dpc() does its own cleanup */ ret = _mwifiex_fw_dpc(adapter->firmware, adapter); if (ret) { pr_err("Failed to bring up adapter: %d\n", ret); return ret; } mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); return 0; err_init_fw: mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); err_kmalloc: set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { mwifiex_dbg(adapter, ERROR, "info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); mwifiex_free_cmd_buffers(adapter); } complete_all(adapter->fw_done); mwifiex_dbg(adapter, INFO, "%s, error\n", __func__); return -1; } EXPORT_SYMBOL_GPL(mwifiex_reinit_sw); static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv) { struct mwifiex_adapter *adapter = priv; dev_dbg(adapter->dev, "%s: wake by wifi", __func__); adapter->wake_by_wifi = true; disable_irq_nosync(irq); /* Notify PM core we are wakeup source */ pm_wakeup_event(adapter->dev, 0); pm_system_wakeup(); return IRQ_HANDLED; } static void mwifiex_probe_of(struct mwifiex_adapter *adapter) { int ret; struct device *dev = adapter->dev; if (!dev->of_node) goto err_exit; adapter->dt_node = dev->of_node; adapter->irq_wakeup = irq_of_parse_and_map(adapter->dt_node, 0); if (!adapter->irq_wakeup) { dev_dbg(dev, "fail to parse irq_wakeup from device tree\n"); goto err_exit; } ret = devm_request_irq(dev, adapter->irq_wakeup, mwifiex_irq_wakeup_handler, IRQF_TRIGGER_LOW, "wifi_wake", adapter); if (ret) { dev_err(dev, "Failed to request irq_wakeup %d (%d)\n", adapter->irq_wakeup, ret); goto err_exit; } disable_irq(adapter->irq_wakeup); if (device_init_wakeup(dev, true)) { dev_err(dev, "fail to init wakeup for mwifiex\n"); goto err_exit; } return; err_exit: adapter->irq_wakeup = -1; } /* * This function adds the card. * * This function follows the following major steps to set up the device - * - Initialize software. This includes probing the card, registering * the interface operations table, and allocating/initializing the * adapter structure * - Set up the netlink socket * - Create and start the main work queue * - Register the device * - Initialize firmware and hardware * - Add logical interfaces */ int mwifiex_add_card(void *card, struct completion *fw_done, struct mwifiex_if_ops *if_ops, u8 iface_type, struct device *dev) { struct mwifiex_adapter *adapter; if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) { pr_err("%s: software init failed\n", __func__); goto err_init_sw; } mwifiex_probe_of(adapter); adapter->iface_type = iface_type; adapter->fw_done = fw_done; adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); init_waitqueue_head(&adapter->init_wait_q); clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); adapter->hs_activated = false; init_waitqueue_head(&adapter->hs_activate_wait_q); init_waitqueue_head(&adapter->cmd_wait_q.wait); adapter->cmd_wait_q.status = 0; adapter->scan_wait_q_woken = false; if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) adapter->rx_work_enabled = true; adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->workqueue) goto err_kmalloc; INIT_WORK(&adapter->main_work, mwifiex_main_work_queue); if (adapter->rx_work_enabled) { adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->rx_workqueue) goto err_kmalloc; INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); } /* Register the device. Fill up the private data structure with relevant information from the card. */ if (adapter->if_ops.register_dev(adapter)) { pr_err("%s: failed to register mwifiex device\n", __func__); goto err_registerdev; } if (adapter->host_mlme_enabled) { adapter->host_mlme_workqueue = alloc_workqueue("MWIFIEX_HOST_MLME_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!adapter->host_mlme_workqueue) goto err_kmalloc; INIT_WORK(&adapter->host_mlme_work, mwifiex_host_mlme_work_queue); } if (mwifiex_init_hw_fw(adapter, true)) { pr_err("%s: firmware init failed\n", __func__); goto err_init_fw; } return 0; err_init_fw: pr_debug("info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); err_registerdev: set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags); mwifiex_terminate_workqueue(adapter); if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); mwifiex_free_cmd_buffers(adapter); } err_kmalloc: if (adapter->irq_wakeup >= 0) device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); err_init_sw: return -1; } EXPORT_SYMBOL_GPL(mwifiex_add_card); /* * This function removes the card. * * This function follows the following major steps to remove the device - * - Stop data traffic * - Shutdown firmware * - Remove the logical interfaces * - Terminate the work queue * - Unregister the device * - Free the adapter structure */ int mwifiex_remove_card(struct mwifiex_adapter *adapter) { if (!adapter) return 0; if (adapter->is_up) mwifiex_uninit_sw(adapter); if (adapter->irq_wakeup >= 0) device_init_wakeup(adapter->dev, false); /* Unregister device */ mwifiex_dbg(adapter, INFO, "info: unregister device\n"); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); /* Free adapter structure */ mwifiex_dbg(adapter, INFO, "info: free adapter\n"); mwifiex_free_adapter(adapter); return 0; } EXPORT_SYMBOL_GPL(mwifiex_remove_card); void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask, const char *fmt, ...) { struct va_format vaf; va_list args; if (!(adapter->debug_mask & mask)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (adapter->dev) dev_info(adapter->dev, "%pV", &vaf); else pr_info("%pV", &vaf); va_end(args); } EXPORT_SYMBOL_GPL(_mwifiex_dbg); /* * This function initializes the module. * * The debug FS is also initialized if configured. */ static int mwifiex_init_module(void) { #ifdef CONFIG_DEBUG_FS mwifiex_debugfs_init(); #endif return 0; } /* * This function cleans up the module. * * The debug FS is removed if available. */ static void mwifiex_cleanup_module(void) { #ifdef CONFIG_DEBUG_FS mwifiex_debugfs_remove(); #endif } module_init(mwifiex_init_module); module_exit(mwifiex_cleanup_module); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex Driver version " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL v2"); |
4 3 2 1 1 4 4 1 3 2 2 2 4 2 2 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 | // SPDX-License-Identifier: GPL-2.0-or-later /* * SQ905C subdriver * * Copyright (C) 2009 Theodore Kilgore */ /* * * This driver uses work done in * libgphoto2/camlibs/digigr8, Copyright (C) Theodore Kilgore. * * This driver has also used as a base the sq905c driver * and may contain code fragments from it. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905c" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905C USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905C_CMD_TIMEOUT 500 #define SQ905C_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905C_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 0x50 /* Commands. These go in the "value" slot. */ #define SQ905C_CLEAR 0xa0 /* clear everything */ #define SQ905C_GET_ID 0x14f4 /* Read version number */ #define SQ905C_CAPTURE_LOW 0xa040 /* Starts capture at 160x120 */ #define SQ905C_CAPTURE_MED 0x1440 /* Starts capture at 320x240 */ #define SQ905C_CAPTURE_HI 0x2840 /* Starts capture at 320x240 */ /* For capture, this must go in the "index" slot. */ #define SQ905C_CAPTURE_INDEX 0x110f /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ const struct v4l2_pix_format *cap_mode; /* Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; /* * Most of these cameras will do 640x480 and 320x240. 160x120 works * in theory but gives very poor output. Therefore, not supported. * The 0x2770:0x9050 cameras have max resolution of 320x240. */ static struct v4l2_pix_format sq905c_mode[] = { { 320, 240, V4L2_PIX_FMT_SQ905C, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SQ905C, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* Send a command to the camera. */ static int sq905c_command(struct gspca_dev *gspca_dev, u16 command, u16 index) { int ret; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, command, index, NULL, 0, SQ905C_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } static int sq905c_read(struct gspca_dev *gspca_dev, u16 command, u16 index, int size) { int ret; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, command, index, gspca_dev->usb_buf, size, SQ905C_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void sq905c_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int act_len; int packet_type; int ret; u8 *buffer; buffer = kmalloc(SQ905C_MAX_TRANSFER, GFP_KERNEL); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* Request the header, which tells the size to download */ ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), buffer, FRAME_HEADER_LEN, &act_len, SQ905C_DATA_TIMEOUT); gspca_dbg(gspca_dev, D_STREAM, "Got %d bytes out of %d for header\n", act_len, FRAME_HEADER_LEN); if (ret < 0 || act_len < FRAME_HEADER_LEN) goto quit_stream; /* size is read from 4 bytes starting 0x40, little endian */ bytes_left = buffer[0x40]|(buffer[0x41]<<8)|(buffer[0x42]<<16) |(buffer[0x43]<<24); gspca_dbg(gspca_dev, D_STREAM, "bytes_left = 0x%x\n", bytes_left); /* We keep the header. It has other information, too. */ packet_type = FIRST_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, FRAME_HEADER_LEN); while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905C_MAX_TRANSFER ? SQ905C_MAX_TRANSFER : bytes_left; ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), buffer, data_len, &act_len, SQ905C_DATA_TIMEOUT); if (ret < 0 || act_len < data_len) goto quit_stream; gspca_dbg(gspca_dev, D_STREAM, "Got %d bytes out of %d for frame\n", data_len, bytes_left); bytes_left -= data_len; if (bytes_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, data_len); } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905c_command(gspca_dev, SQ905C_CLEAR, 0); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; int ret; gspca_dbg(gspca_dev, D_PROBE, "SQ9050 camera detected (vid/pid 0x%04X:0x%04X)\n", id->idVendor, id->idProduct); ret = sq905c_command(gspca_dev, SQ905C_GET_ID, 0); if (ret < 0) { gspca_err(gspca_dev, "Get version command failed\n"); return ret; } ret = sq905c_read(gspca_dev, 0xf5, 0, 20); if (ret < 0) { gspca_err(gspca_dev, "Reading version command failed\n"); return ret; } /* Note we leave out the usb id and the manufacturing date */ gspca_dbg(gspca_dev, D_PROBE, "SQ9050 ID string: %02x - %*ph\n", gspca_dev->usb_buf[3], 6, gspca_dev->usb_buf + 14); cam->cam_mode = sq905c_mode; cam->nmodes = 2; if (gspca_dev->usb_buf[15] == 0) cam->nmodes = 1; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk_size = 32; cam->bulk = 1; INIT_WORK(&dev->work_struct, sq905c_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905c_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { /* connect to the camera and reset it. */ return sq905c_command(gspca_dev, SQ905C_CLEAR, 0); } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; dev->cap_mode = gspca_dev->cam.cam_mode; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->pixfmt.width) { case 640: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at high resolution\n"); dev->cap_mode++; ret = sq905c_command(gspca_dev, SQ905C_CAPTURE_HI, SQ905C_CAPTURE_INDEX); break; default: /* 320 */ gspca_dbg(gspca_dev, D_STREAM, "Start streaming at medium resolution\n"); ret = sq905c_command(gspca_dev, SQ905C_CAPTURE_MED, SQ905C_CAPTURE_INDEX); } if (ret < 0) { gspca_err(gspca_dev, "Start streaming command failed\n"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); if (!dev->work_thread) return -ENOMEM; queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x905c)}, {USB_DEVICE(0x2770, 0x9050)}, {USB_DEVICE(0x2770, 0x9051)}, {USB_DEVICE(0x2770, 0x9052)}, {USB_DEVICE(0x2770, 0x913d)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
109 108 109 108 111 111 109 107 109 110 2980 2979 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 | // SPDX-License-Identifier: GPL-2.0-or-later /* * linux/drivers/net/netconsole.c * * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> * * This file contains the implementation of an IRQ-safe, crash-safe * kernel console implementation that outputs kernel messages to the * network. * * Modification history: * * 2001-09-17 started by Ingo Molnar. * 2003-08-11 2.6 port by Matt Mackall * simplified options * generic card hooks * works non-modular * 2003-09-07 rewritten with netpoll api */ /**************************************************************** * ****************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/console.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netpoll.h> #include <linux/inet.h> #include <linux/configfs.h> #include <linux/etherdevice.h> #include <linux/utsname.h> #include <linux/rtnetlink.h> MODULE_AUTHOR("Matt Mackall <mpm@selenic.com>"); MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); #define MAX_PARAM_LENGTH 256 #define MAX_USERDATA_ENTRY_LENGTH 256 #define MAX_USERDATA_VALUE_LENGTH 200 /* The number 3 comes from userdata entry format characters (' ', '=', '\n') */ #define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \ MAX_USERDATA_VALUE_LENGTH - 3) #define MAX_USERDATA_ITEMS 16 #define MAX_PRINT_CHUNK 1000 static char config[MAX_PARAM_LENGTH]; module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]"); static bool oops_only; module_param(oops_only, bool, 0600); MODULE_PARM_DESC(oops_only, "Only log oops messages"); #define NETCONSOLE_PARAM_TARGET_PREFIX "cmdline" #ifndef MODULE static int __init option_setup(char *opt) { strscpy(config, opt, MAX_PARAM_LENGTH); return 1; } __setup("netconsole=", option_setup); #endif /* MODULE */ /* Linked list of all configured targets */ static LIST_HEAD(target_list); /* target_cleanup_list is used to track targets that need to be cleaned outside * of target_list_lock. It should be cleaned in the same function it is * populated. */ static LIST_HEAD(target_cleanup_list); /* This needs to be a spinlock because write_msg() cannot sleep */ static DEFINE_SPINLOCK(target_list_lock); /* This needs to be a mutex because netpoll_cleanup might sleep */ static DEFINE_MUTEX(target_cleanup_list_lock); /* * Console driver for extended netconsoles. Registered on the first use to * avoid unnecessarily enabling ext message formatting. */ static struct console netconsole_ext; /** * struct netconsole_target - Represents a configured netconsole target. * @list: Links this target into the target_list. * @group: Links us into the configfs subsystem hierarchy. * @userdata_group: Links to the userdata configfs hierarchy * @userdata_complete: Cached, formatted string of append * @userdata_length: String length of userdata_complete * @enabled: On / off knob to enable / disable target. * Visible from userspace (read-write). * We maintain a strict 1:1 correspondence between this and * whether the corresponding netpoll is active or inactive. * Also, other parameters of a target may be modified at * runtime only when it is disabled (enabled == 0). * @extended: Denotes whether console is extended or not. * @release: Denotes whether kernel release version should be prepended * to the message. Depends on extended console. * @np: The netpoll structure for this target. * Contains the other userspace visible parameters: * dev_name (read-write) * local_port (read-write) * remote_port (read-write) * local_ip (read-write) * remote_ip (read-write) * local_mac (read-only) * remote_mac (read-write) */ struct netconsole_target { struct list_head list; #ifdef CONFIG_NETCONSOLE_DYNAMIC struct config_group group; struct config_group userdata_group; char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS]; size_t userdata_length; #endif bool enabled; bool extended; bool release; struct netpoll np; }; #ifdef CONFIG_NETCONSOLE_DYNAMIC static struct configfs_subsystem netconsole_subsys; static DEFINE_MUTEX(dynamic_netconsole_mutex); static int __init dynamic_netconsole_init(void) { config_group_init(&netconsole_subsys.su_group); mutex_init(&netconsole_subsys.su_mutex); return configfs_register_subsystem(&netconsole_subsys); } static void __exit dynamic_netconsole_exit(void) { configfs_unregister_subsystem(&netconsole_subsys); } /* * Targets that were created by parsing the boot/module option string * do not exist in the configfs hierarchy (and have NULL names) and will * never go away, so make these a no-op for them. */ static void netconsole_target_get(struct netconsole_target *nt) { if (config_item_name(&nt->group.cg_item)) config_group_get(&nt->group); } static void netconsole_target_put(struct netconsole_target *nt) { if (config_item_name(&nt->group.cg_item)) config_group_put(&nt->group); } #else /* !CONFIG_NETCONSOLE_DYNAMIC */ static int __init dynamic_netconsole_init(void) { return 0; } static void __exit dynamic_netconsole_exit(void) { } /* * No danger of targets going away from under us when dynamic * reconfigurability is off. */ static void netconsole_target_get(struct netconsole_target *nt) { } static void netconsole_target_put(struct netconsole_target *nt) { } static void populate_configfs_item(struct netconsole_target *nt, int cmdline_count) { } #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Allocate and initialize with defaults. * Note that these targets get their config_item fields zeroed-out. */ static struct netconsole_target *alloc_and_init(void) { struct netconsole_target *nt; nt = kzalloc(sizeof(*nt), GFP_KERNEL); if (!nt) return nt; if (IS_ENABLED(CONFIG_NETCONSOLE_EXTENDED_LOG)) nt->extended = true; if (IS_ENABLED(CONFIG_NETCONSOLE_PREPEND_RELEASE)) nt->release = true; nt->np.name = "netconsole"; strscpy(nt->np.dev_name, "eth0", IFNAMSIZ); nt->np.local_port = 6665; nt->np.remote_port = 6666; eth_broadcast_addr(nt->np.remote_mac); return nt; } /* Clean up every target in the cleanup_list and move the clean targets back to * the main target_list. */ static void netconsole_process_cleanups_core(void) { struct netconsole_target *nt, *tmp; unsigned long flags; /* The cleanup needs RTNL locked */ ASSERT_RTNL(); mutex_lock(&target_cleanup_list_lock); list_for_each_entry_safe(nt, tmp, &target_cleanup_list, list) { /* all entries in the cleanup_list needs to be disabled */ WARN_ON_ONCE(nt->enabled); do_netpoll_cleanup(&nt->np); /* moved the cleaned target to target_list. Need to hold both * locks */ spin_lock_irqsave(&target_list_lock, flags); list_move(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); } WARN_ON_ONCE(!list_empty(&target_cleanup_list)); mutex_unlock(&target_cleanup_list_lock); } #ifdef CONFIG_NETCONSOLE_DYNAMIC /* * Our subsystem hierarchy is: * * /sys/kernel/config/netconsole/ * | * <target>/ * | enabled * | release * | dev_name * | local_port * | remote_port * | local_ip * | remote_ip * | local_mac * | remote_mac * | userdata/ * | <key>/ * | value * | ... * | * <target>/... */ static struct netconsole_target *to_target(struct config_item *item) { struct config_group *cfg_group; cfg_group = to_config_group(item); if (!cfg_group) return NULL; return container_of(to_config_group(item), struct netconsole_target, group); } /* Do the list cleanup with the rtnl lock hold. rtnl lock is necessary because * netdev might be cleaned-up by calling __netpoll_cleanup(), */ static void netconsole_process_cleanups(void) { /* rtnl lock is called here, because it has precedence over * target_cleanup_list_lock mutex and target_cleanup_list */ rtnl_lock(); netconsole_process_cleanups_core(); rtnl_unlock(); } /* Get rid of possible trailing newline, returning the new length */ static void trim_newline(char *s, size_t maxlen) { size_t len; len = strnlen(s, maxlen); if (s[len - 1] == '\n') s[len - 1] = '\0'; } /* * Attribute operations for netconsole_target. */ static ssize_t enabled_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->enabled); } static ssize_t extended_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->extended); } static ssize_t release_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->release); } static ssize_t dev_name_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%s\n", to_target(item)->np.dev_name); } static ssize_t local_port_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->np.local_port); } static ssize_t remote_port_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->np.remote_port); } static ssize_t local_ip_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) return sysfs_emit(buf, "%pI6c\n", &nt->np.local_ip.in6); else return sysfs_emit(buf, "%pI4\n", &nt->np.local_ip); } static ssize_t remote_ip_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) return sysfs_emit(buf, "%pI6c\n", &nt->np.remote_ip.in6); else return sysfs_emit(buf, "%pI4\n", &nt->np.remote_ip); } static ssize_t local_mac_show(struct config_item *item, char *buf) { struct net_device *dev = to_target(item)->np.dev; static const u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; return sysfs_emit(buf, "%pM\n", dev ? dev->dev_addr : bcast); } static ssize_t remote_mac_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac); } /* * This one is special -- targets created through the configfs interface * are not enabled (and the corresponding netpoll activated) by default. * The user is expected to set the desired parameters first (which * would enable him to dynamically add new netpoll targets for new * network interfaces as and when they come up). */ static ssize_t enabled_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); unsigned long flags; bool enabled; ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); ret = kstrtobool(buf, &enabled); if (ret) goto out_unlock; ret = -EINVAL; if (enabled == nt->enabled) { pr_info("network logging has already %s\n", nt->enabled ? "started" : "stopped"); goto out_unlock; } if (enabled) { /* true */ if (nt->release && !nt->extended) { pr_err("Not enabling netconsole. Release feature requires extended log message"); goto out_unlock; } if (nt->extended && !console_is_registered(&netconsole_ext)) register_console(&netconsole_ext); /* * Skip netpoll_parse_options() -- all the attributes are * already configured via configfs. Just print them out. */ netpoll_print_options(&nt->np); ret = netpoll_setup(&nt->np); if (ret) goto out_unlock; nt->enabled = true; pr_info("network logging started\n"); } else { /* false */ /* We need to disable the netconsole before cleaning it up * otherwise we might end up in write_msg() with * nt->np.dev == NULL and nt->enabled == true */ mutex_lock(&target_cleanup_list_lock); spin_lock_irqsave(&target_list_lock, flags); nt->enabled = false; /* Remove the target from the list, while holding * target_list_lock */ list_move(&nt->list, &target_cleanup_list); spin_unlock_irqrestore(&target_list_lock, flags); mutex_unlock(&target_cleanup_list_lock); } ret = strnlen(buf, count); /* Deferred cleanup */ netconsole_process_cleanups(); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t release_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); bool release; ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); ret = -EINVAL; goto out_unlock; } ret = kstrtobool(buf, &release); if (ret) goto out_unlock; nt->release = release; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t extended_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); bool extended; ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); ret = -EINVAL; goto out_unlock; } ret = kstrtobool(buf, &extended); if (ret) goto out_unlock; nt->extended = extended; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t dev_name_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); mutex_unlock(&dynamic_netconsole_mutex); return -EINVAL; } strscpy(nt->np.dev_name, buf, IFNAMSIZ); trim_newline(nt->np.dev_name, IFNAMSIZ); mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); } static ssize_t local_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } ret = kstrtou16(buf, 10, &nt->np.local_port); if (ret < 0) goto out_unlock; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t remote_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } ret = kstrtou16(buf, 10, &nt->np.remote_port); if (ret < 0) goto out_unlock; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t local_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } if (strnchr(buf, count, ':')) { const char *end; if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) { if (*end && *end != '\n') { pr_err("invalid IPv6 address at: <%c>\n", *end); goto out_unlock; } nt->np.ipv6 = true; } else goto out_unlock; } else { if (!nt->np.ipv6) nt->np.local_ip.ip = in_aton(buf); else goto out_unlock; } ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t remote_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } if (strnchr(buf, count, ':')) { const char *end; if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) { if (*end && *end != '\n') { pr_err("invalid IPv6 address at: <%c>\n", *end); goto out_unlock; } nt->np.ipv6 = true; } else goto out_unlock; } else { if (!nt->np.ipv6) nt->np.remote_ip.ip = in_aton(buf); else goto out_unlock; } ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t remote_mac_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); u8 remote_mac[ETH_ALEN]; ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } if (!mac_pton(buf, remote_mac)) goto out_unlock; if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n') goto out_unlock; memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } struct userdatum { struct config_item item; char value[MAX_USERDATA_VALUE_LENGTH]; }; static struct |