Total coverage: 223736 (12%)of 1897811
5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Logitech Gaming Wheels * * Including G27, G25, DFP, DFGT, FFEX, Momo, Momo2 & * Speed Force Wireless (WiiWheel) * * Copyright (c) 2010 Simon Wood <simon@mungewell.org> */ /* */ #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include "usbhid/usbhid.h" #include "hid-lg.h" #include "hid-lg4ff.h" #include "hid-ids.h" #define LG4FF_MMODE_IS_MULTIMODE 0 #define LG4FF_MMODE_SWITCHED 1 #define LG4FF_MMODE_NOT_MULTIMODE 2 #define LG4FF_MODE_NATIVE_IDX 0 #define LG4FF_MODE_DFEX_IDX 1 #define LG4FF_MODE_DFP_IDX 2 #define LG4FF_MODE_G25_IDX 3 #define LG4FF_MODE_DFGT_IDX 4 #define LG4FF_MODE_G27_IDX 5 #define LG4FF_MODE_G29_IDX 6 #define LG4FF_MODE_MAX_IDX 7 #define LG4FF_MODE_NATIVE BIT(LG4FF_MODE_NATIVE_IDX) #define LG4FF_MODE_DFEX BIT(LG4FF_MODE_DFEX_IDX) #define LG4FF_MODE_DFP BIT(LG4FF_MODE_DFP_IDX) #define LG4FF_MODE_G25 BIT(LG4FF_MODE_G25_IDX) #define LG4FF_MODE_DFGT BIT(LG4FF_MODE_DFGT_IDX) #define LG4FF_MODE_G27 BIT(LG4FF_MODE_G27_IDX) #define LG4FF_MODE_G29 BIT(LG4FF_MODE_G29_IDX) #define LG4FF_DFEX_TAG "DF-EX" #define LG4FF_DFEX_NAME "Driving Force / Formula EX" #define LG4FF_DFP_TAG "DFP" #define LG4FF_DFP_NAME "Driving Force Pro" #define LG4FF_G25_TAG "G25" #define LG4FF_G25_NAME "G25 Racing Wheel" #define LG4FF_G27_TAG "G27" #define LG4FF_G27_NAME "G27 Racing Wheel" #define LG4FF_G29_TAG "G29" #define LG4FF_G29_NAME "G29 Racing Wheel" #define LG4FF_DFGT_TAG "DFGT" #define LG4FF_DFGT_NAME "Driving Force GT" #define LG4FF_FFEX_REV_MAJ 0x21 #define LG4FF_FFEX_REV_MIN 0x00 static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range); static void lg4ff_set_range_g25(struct hid_device *hid, u16 range); struct lg4ff_wheel_data { const u32 product_id; u16 combine; u16 range; const u16 min_range; const u16 max_range; #ifdef CONFIG_LEDS_CLASS u8 led_state; struct led_classdev *led[5]; #endif const u32 alternate_modes; const char * const real_tag; const char * const real_name; const u16 real_product_id; void (*set_range)(struct hid_device *hid, u16 range); }; struct lg4ff_device_entry { spinlock_t report_lock; /* Protect output HID report */ struct hid_report *report; struct lg4ff_wheel_data wdata; }; static const signed short lg4ff_wheel_effects[] = { FF_CONSTANT, FF_AUTOCENTER, -1 }; static const signed short no_wheel_effects[] = { -1 }; struct lg4ff_wheel { const u32 product_id; const signed short *ff_effects; const u16 min_range; const u16 max_range; void (*set_range)(struct hid_device *hid, u16 range); }; struct lg4ff_compat_mode_switch { const u8 cmd_count; /* Number of commands to send */ const u8 cmd[]; }; struct lg4ff_wheel_ident_info { const u32 modes; const u16 mask; const u16 result; const u16 real_product_id; }; struct lg4ff_multimode_wheel { const u16 product_id; const u32 alternate_modes; const char *real_tag; const char *real_name; }; struct lg4ff_alternate_mode { const u16 product_id; const char *tag; const char *name; }; static const struct lg4ff_wheel lg4ff_devices[] = { {USB_DEVICE_ID_LOGITECH_WINGMAN_FG, no_wheel_effects, 40, 180, NULL}, {USB_DEVICE_ID_LOGITECH_WINGMAN_FFG, lg4ff_wheel_effects, 40, 180, NULL}, {USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_dfp}, {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_G29_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL} }; static const struct lg4ff_multimode_wheel lg4ff_multimode_wheels[] = { {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_DFP_TAG, LG4FF_DFP_NAME}, {USB_DEVICE_ID_LOGITECH_G25_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_G25_TAG, LG4FF_G25_NAME}, {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_DFGT_TAG, LG4FF_DFGT_NAME}, {USB_DEVICE_ID_LOGITECH_G27_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_G27_TAG, LG4FF_G27_NAME}, {USB_DEVICE_ID_LOGITECH_G29_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_G29 | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_G29_TAG, LG4FF_G29_NAME}, }; static const struct lg4ff_alternate_mode lg4ff_alternate_modes[] = { [LG4FF_MODE_NATIVE_IDX] = {0, "native", ""}, [LG4FF_MODE_DFEX_IDX] = {USB_DEVICE_ID_LOGITECH_WHEEL, LG4FF_DFEX_TAG, LG4FF_DFEX_NAME}, [LG4FF_MODE_DFP_IDX] = {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, LG4FF_DFP_TAG, LG4FF_DFP_NAME}, [LG4FF_MODE_G25_IDX] = {USB_DEVICE_ID_LOGITECH_G25_WHEEL, LG4FF_G25_TAG, LG4FF_G25_NAME}, [LG4FF_MODE_DFGT_IDX] = {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, LG4FF_DFGT_TAG, LG4FF_DFGT_NAME}, [LG4FF_MODE_G27_IDX] = {USB_DEVICE_ID_LOGITECH_G27_WHEEL, LG4FF_G27_TAG, LG4FF_G27_NAME}, [LG4FF_MODE_G29_IDX] = {USB_DEVICE_ID_LOGITECH_G29_WHEEL, LG4FF_G29_TAG, LG4FF_G29_NAME}, }; /* Multimode wheel identificators */ static const struct lg4ff_wheel_ident_info lg4ff_dfp_ident_info = { LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xf000, 0x1000, USB_DEVICE_ID_LOGITECH_DFP_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g25_ident_info = { LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xff00, 0x1200, USB_DEVICE_ID_LOGITECH_G25_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g27_ident_info = { LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xfff0, 0x1230, USB_DEVICE_ID_LOGITECH_G27_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_dfgt_ident_info = { LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xff00, 0x1300, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g29_ident_info = { LG4FF_MODE_G29 | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xfff8, 0x1350, USB_DEVICE_ID_LOGITECH_G29_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g29_ident_info2 = { LG4FF_MODE_G29 | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xff00, 0x8900, USB_DEVICE_ID_LOGITECH_G29_WHEEL }; /* Multimode wheel identification checklists */ static const struct lg4ff_wheel_ident_info *lg4ff_main_checklist[] = { &lg4ff_g29_ident_info, &lg4ff_g29_ident_info2, &lg4ff_dfgt_ident_info, &lg4ff_g27_ident_info, &lg4ff_g25_ident_info, &lg4ff_dfp_ident_info }; /* Compatibility mode switching commands */ /* EXT_CMD9 - Understood by G27 and DFGT */ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_dfex = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x00, 0x01, 0x00, 0x00, 0x00} /* Switch mode to DF-EX with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_dfp = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x01, 0x01, 0x00, 0x00, 0x00} /* Switch mode to DFP with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_g25 = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x02, 0x01, 0x00, 0x00, 0x00} /* Switch mode to G25 with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_dfgt = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00} /* Switch mode to DFGT with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_g27 = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00} /* Switch mode to G27 with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_g29 = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x05, 0x01, 0x01, 0x00, 0x00} /* Switch mode to G29 with detach */ }; /* EXT_CMD1 - Understood by DFP, G25, G27 and DFGT */ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext01_dfp = { 1, {0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* EXT_CMD16 - Understood by G25 and G27 */ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext16_g25 = { 1, {0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* Recalculates X axis value accordingly to currently selected range */ static s32 lg4ff_adjust_dfp_x_axis(s32 value, u16 range) { u16 max_range; s32 new_value; if (range == 900) return value; else if (range == 200) return value; else if (range < 200) max_range = 200; else max_range = 900; new_value = 8192 + mult_frac(value - 8192, max_range, range); if (new_value < 0) return 0; else if (new_value > 16383) return 16383; else return new_value; } int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data) { struct lg4ff_device_entry *entry = drv_data->device_props; s32 new_value = 0; if (!entry) { hid_err(hid, "Device properties not found"); return 0; } switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: switch (usage->code) { case ABS_X: new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range); input_event(field->hidinput->input, usage->type, usage->code, new_value); return 1; default: return 0; } default: return 0; } } int lg4ff_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *rd, int size, struct lg_drv_data *drv_data) { int offset; struct lg4ff_device_entry *entry = drv_data->device_props; if (!entry) return 0; /* adjust HID report present combined pedals data */ if (entry->wdata.combine) { switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_WHEEL: rd[5] = rd[3]; rd[6] = 0x7F; return 1; case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: rd[4] = rd[3]; rd[5] = 0x7F; return 1; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: rd[5] = rd[4]; rd[6] = 0x7F; return 1; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: case USB_DEVICE_ID_LOGITECH_G27_WHEEL: offset = 5; break; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: case USB_DEVICE_ID_LOGITECH_G29_WHEEL: offset = 6; break; case USB_DEVICE_ID_LOGITECH_WII_WHEEL: offset = 3; break; default: return 0; } /* Compute a combined axis when wheel does not supply it */ rd[offset] = (0xFF + rd[offset] - rd[offset+1]) >> 1; rd[offset+1] = 0x7F; return 1; } return 0; } static void lg4ff_init_wheel_data(struct lg4ff_wheel_data * const wdata, const struct lg4ff_wheel *wheel, const struct lg4ff_multimode_wheel *mmode_wheel, const u16 real_product_id) { u32 alternate_modes = 0; const char *real_tag = NULL; const char *real_name = NULL; if (mmode_wheel) { alternate_modes = mmode_wheel->alternate_modes; real_tag = mmode_wheel->real_tag; real_name = mmode_wheel->real_name; } { struct lg4ff_wheel_data t_wdata = { .product_id = wheel->product_id, .real_product_id = real_product_id, .combine = 0, .min_range = wheel->min_range, .max_range = wheel->max_range, .set_range = wheel->set_range, .alternate_modes = alternate_modes, .real_tag = real_tag, .real_name = real_name }; memcpy(wdata, &t_wdata, sizeof(t_wdata)); } } static int lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; int x; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } value = entry->report->field[0]->value; #define CLAMP(x) do { if (x < 0) x = 0; else if (x > 0xff) x = 0xff; } while (0) switch (effect->type) { case FF_CONSTANT: x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */ CLAMP(x); spin_lock_irqsave(&entry->report_lock, flags); if (x == 0x80) { /* De-activate force in slot-1*/ value[0] = 0x13; value[1] = 0x00; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); return 0; } value[0] = 0x11; /* Slot 1 */ value[1] = 0x08; value[2] = x; value[3] = 0x80; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); break; } return 0; } /* Sends default autocentering command compatible with * all wheels except Formula Force EX */ static void lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); s32 *value; u32 expand_a, expand_b; struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; /* De-activate Auto-Center */ spin_lock_irqsave(&entry->report_lock, flags); if (magnitude == 0) { value[0] = 0xf5; value[1] = 0x00; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); return; } if (magnitude <= 0xaaaa) { expand_a = 0x0c * magnitude; expand_b = 0x80 * magnitude; } else { expand_a = (0x0c * 0xaaaa) + 0x06 * (magnitude - 0xaaaa); expand_b = (0x80 * 0xaaaa) + 0xff * (magnitude - 0xaaaa); } /* Adjust for non-MOMO wheels */ switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: break; default: expand_a = expand_a >> 1; break; } value[0] = 0xfe; value[1] = 0x0d; value[2] = expand_a / 0xaaaa; value[3] = expand_a / 0xaaaa; value[4] = expand_b / 0xaaaa; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); /* Activate Auto-Center */ value[0] = 0x14; value[1] = 0x00; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends autocentering command compatible with Formula Force EX */ static void lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; magnitude = magnitude * 90 / 65535; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xfe; value[1] = 0x03; value[2] = magnitude >> 14; value[3] = magnitude >> 14; value[4] = magnitude; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends command to set range compatible with G25/G27/Driving Force GT */ static void lg4ff_set_range_g25(struct hid_device *hid, u16 range) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; dbg_hid("G25/G27/DFGT: setting range to %u\n", range); spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x81; value[2] = range & 0x00ff; value[3] = (range & 0xff00) >> 8; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends commands to set range compatible with Driving Force Pro wheel */ static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; int start_left, start_right, full_range; s32 *value; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; dbg_hid("Driving Force Pro: setting range to %u\n", range); /* Prepare "coarse" limit command */ spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x00; /* Set later */ value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; if (range > 200) { value[1] = 0x03; full_range = 900; } else { value[1] = 0x02; full_range = 200; } hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); /* Prepare "fine" limit command */ value[0] = 0x81; value[1] = 0x0b; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; if (range == 200 || range == 900) { /* Do not apply any fine limit */ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); return; } /* Construct fine limit command */ start_left = (((full_range - range + 1) * 2047) / full_range); start_right = 0xfff - start_left; value[2] = start_left >> 4; value[3] = start_right >> 4; value[4] = 0xff; value[5] = (start_right & 0xe) << 4 | (start_left & 0xe); value[6] = 0xff; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } static const struct lg4ff_compat_mode_switch *lg4ff_get_mode_switch_command(const u16 real_product_id, const u16 target_product_id) { switch (real_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext01_dfp; /* DFP can only be switched to its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext01_dfp; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: return &lg4ff_mode_switch_ext16_g25; /* G25 can only be switched to DFP mode or its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_G27_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_WHEEL: return &lg4ff_mode_switch_ext09_dfex; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext09_dfp; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: return &lg4ff_mode_switch_ext09_g25; case USB_DEVICE_ID_LOGITECH_G27_WHEEL: return &lg4ff_mode_switch_ext09_g27; /* G27 can only be switched to DF-EX, DFP, G25 or its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_G29_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext09_dfp; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: return &lg4ff_mode_switch_ext09_dfgt; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: return &lg4ff_mode_switch_ext09_g25; case USB_DEVICE_ID_LOGITECH_G27_WHEEL: return &lg4ff_mode_switch_ext09_g27; case USB_DEVICE_ID_LOGITECH_G29_WHEEL: return &lg4ff_mode_switch_ext09_g29; /* G29 can only be switched to DF-EX, DFP, DFGT, G25, G27 or its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_WHEEL: return &lg4ff_mode_switch_ext09_dfex; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext09_dfp; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: return &lg4ff_mode_switch_ext09_dfgt; /* DFGT can only be switched to DF-EX, DFP or its native mode */ default: return NULL; } break; /* No other wheels have multiple modes */ default: return NULL; } } static int lg4ff_switch_compatibility_mode(struct hid_device *hid, const struct lg4ff_compat_mode_switch *s) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; u8 i; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } value = entry->report->field[0]->value; spin_lock_irqsave(&entry->report_lock, flags); for (i = 0; i < s->cmd_count; i++) { u8 j; for (j = 0; j < 7; j++) value[j] = s->cmd[j + (7*i)]; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); } spin_unlock_irqrestore(&entry->report_lock, flags); hid_hw_wait(hid); return 0; } static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; ssize_t count = 0; int i; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } if (!entry->wdata.real_name) { hid_err(hid, "NULL pointer to string\n"); return 0; } for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) { if (entry->wdata.alternate_modes & BIT(i)) { /* Print tag and full name */ count += sysfs_emit_at(buf, count, "%s: %s", lg4ff_alternate_modes[i].tag, !lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name); if (count >= PAGE_SIZE - 1) return count; /* Mark the currently active mode with an asterisk */ if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id || (lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id)) count += sysfs_emit_at(buf, count, " *\n"); else count += sysfs_emit_at(buf, count, "\n"); if (count >= PAGE_SIZE - 1) return count; } } return count; } static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; const struct lg4ff_compat_mode_switch *s; u16 target_product_id = 0; int i, ret; char *lbuf; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } /* Allow \n at the end of the input parameter */ lbuf = kasprintf(GFP_KERNEL, "%s", buf); if (!lbuf) return -ENOMEM; i = strlen(lbuf); if (i == 0) { kfree(lbuf); return -EINVAL; } if (lbuf[i-1] == '\n') { if (i == 1) { kfree(lbuf); return -EINVAL; } lbuf[i-1] = '\0'; } for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) { const u16 mode_product_id = lg4ff_alternate_modes[i].product_id; const char *tag = lg4ff_alternate_modes[i].tag; if (entry->wdata.alternate_modes & BIT(i)) { if (!strcmp(tag, lbuf)) { if (!mode_product_id) target_product_id = entry->wdata.real_product_id; else target_product_id = mode_product_id; break; } } } if (i == LG4FF_MODE_MAX_IDX) { hid_info(hid, "Requested mode \"%s\" is not supported by the device\n", lbuf); kfree(lbuf); return -EINVAL; } kfree(lbuf); /* Not needed anymore */ if (target_product_id == entry->wdata.product_id) /* Nothing to do */ return count; /* Automatic switching has to be disabled for the switch to DF-EX mode to work correctly */ if (target_product_id == USB_DEVICE_ID_LOGITECH_WHEEL && !lg4ff_no_autoswitch) { hid_info(hid, "\"%s\" cannot be switched to \"DF-EX\" mode. Load the \"hid_logitech\" module with \"lg4ff_no_autoswitch=1\" parameter set and try again\n", entry->wdata.real_name); return -EINVAL; } /* Take care of hardware limitations */ if ((entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) && entry->wdata.product_id > target_product_id) { hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->wdata.real_name, lg4ff_alternate_modes[i].name); return -EINVAL; } s = lg4ff_get_mode_switch_command(entry->wdata.real_product_id, target_product_id); if (!s) { hid_err(hid, "Invalid target product ID %X\n", target_product_id); return -EINVAL; } ret = lg4ff_switch_compatibility_mode(hid, s); return (ret == 0 ? count : ret); } static DEVICE_ATTR(alternate_modes, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_alternate_modes_show, lg4ff_alternate_modes_store); static ssize_t lg4ff_combine_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; size_t count; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } count = sysfs_emit(buf, "%u\n", entry->wdata.combine); return count; } static ssize_t lg4ff_combine_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; u16 combine = simple_strtoul(buf, NULL, 10); drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } if (combine > 1) combine = 1; entry->wdata.combine = combine; return count; } static DEVICE_ATTR(combine_pedals, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_combine_show, lg4ff_combine_store); /* Export the currently set range of the wheel */ static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; size_t count; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } count = sysfs_emit(buf, "%u\n", entry->wdata.range); return count; } /* Set range to user specified value, call appropriate function * according to the type of the wheel */ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; u16 range = simple_strtoul(buf, NULL, 10); drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } if (range == 0) range = entry->wdata.max_range; /* Check if the wheel supports range setting * and that the range is within limits for the wheel */ if (entry->wdata.set_range && range >= entry->wdata.min_range && range <= entry->wdata.max_range) { entry->wdata.set_range(hid, range); entry->wdata.range = range; } return count; } static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_range_show, lg4ff_range_store); static ssize_t lg4ff_real_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; size_t count; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } if (!entry->wdata.real_tag || !entry->wdata.real_name) { hid_err(hid, "NULL pointer to string\n"); return 0; } count = sysfs_emit(buf, "%s: %s\n", entry->wdata.real_tag, entry->wdata.real_name); return count; } static ssize_t lg4ff_real_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { /* Real ID is a read-only value */ return -EPERM; } static DEVICE_ATTR(real_id, S_IRUGO, lg4ff_real_id_show, lg4ff_real_id_store); #ifdef CONFIG_LEDS_CLASS static void lg4ff_set_leds(struct hid_device *hid, u8 leds) { struct lg_drv_data *drv_data; struct lg4ff_device_entry *entry; unsigned long flags; s32 *value; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x12; value[2] = leds; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } static void lg4ff_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness value) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct lg_drv_data *drv_data = hid_get_drvdata(hid); struct lg4ff_device_entry *entry; int i, state = 0; if (!drv_data) { hid_err(hid, "Device data not found."); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found."); return; } for (i = 0; i < 5; i++) { if (led_cdev != entry->wdata.led[i]) continue; state = (entry->wdata.led_state >> i) & 1; if (value == LED_OFF && state) { entry->wdata.led_state &= ~(1 << i); lg4ff_set_leds(hid, entry->wdata.led_state); } else if (value != LED_OFF && !state) { entry->wdata.led_state |= 1 << i; lg4ff_set_leds(hid, entry->wdata.led_state); } break; } } static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cdev) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct lg_drv_data *drv_data = hid_get_drvdata(hid); struct lg4ff_device_entry *entry; int i, value = 0; if (!drv_data) { hid_err(hid, "Device data not found."); return LED_OFF; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found."); return LED_OFF; } for (i = 0; i < 5; i++) if (led_cdev == entry->wdata.led[i]) { value = (entry->wdata.led_state >> i) & 1; break; } return value ? LED_FULL : LED_OFF; } #endif static u16 lg4ff_identify_multimode_wheel(struct hid_device *hid, const u16 reported_product_id, const u16 bcdDevice) { u32 current_mode; int i; /* identify current mode from USB PID */ for (i = 1; i < ARRAY_SIZE(lg4ff_alternate_modes); i++) { dbg_hid("Testing whether PID is %X\n", lg4ff_alternate_modes[i].product_id); if (reported_product_id == lg4ff_alternate_modes[i].product_id) break; } if (i == ARRAY_SIZE(lg4ff_alternate_modes)) return 0; current_mode = BIT(i); for (i = 0; i < ARRAY_SIZE(lg4ff_main_checklist); i++) { const u16 mask = lg4ff_main_checklist[i]->mask; const u16 result = lg4ff_main_checklist[i]->result; const u16 real_product_id = lg4ff_main_checklist[i]->real_product_id; if ((current_mode & lg4ff_main_checklist[i]->modes) && \ (bcdDevice & mask) == result) { dbg_hid("Found wheel with real PID %X whose reported PID is %X\n", real_product_id, reported_product_id); return real_product_id; } } /* No match found. This is either Driving Force or an unknown * wheel model, do not touch it */ dbg_hid("Wheel with bcdDevice %X was not recognized as multimode wheel, leaving in its current mode\n", bcdDevice); return 0; } static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_product_id, const u16 bcdDevice) { const u16 reported_product_id = hid->product; int ret; *real_product_id = lg4ff_identify_multimode_wheel(hid, reported_product_id, bcdDevice); /* Probed wheel is not a multimode wheel */ if (!*real_product_id) { *real_product_id = reported_product_id; dbg_hid("Wheel is not a multimode wheel\n"); return LG4FF_MMODE_NOT_MULTIMODE; } /* Switch from "Driving Force" mode to native mode automatically. * Otherwise keep the wheel in its current mode */ if (reported_product_id == USB_DEVICE_ID_LOGITECH_WHEEL && reported_product_id != *real_product_id && !lg4ff_no_autoswitch) { const struct lg4ff_compat_mode_switch *s = lg4ff_get_mode_switch_command(*real_product_id, *real_product_id); if (!s) { hid_err(hid, "Invalid product id %X\n", *real_product_id); return LG4FF_MMODE_NOT_MULTIMODE; } ret = lg4ff_switch_compatibility_mode(hid, s); if (ret) { /* Wheel could not have been switched to native mode, * leave it in "Driving Force" mode and continue */ hid_err(hid, "Unable to switch wheel mode, errno %d\n", ret); return LG4FF_MMODE_IS_MULTIMODE; } return LG4FF_MMODE_SWITCHED; } return LG4FF_MMODE_IS_MULTIMODE; } int lg4ff_init(struct hid_device *hid) { struct hid_input *hidinput; struct input_dev *dev; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice); const struct lg4ff_multimode_wheel *mmode_wheel = NULL; struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; int error, i, j; int mmode_ret, mmode_idx = -1; u16 real_product_id; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -1; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Cannot add device, private driver data not allocated\n"); return -1; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; spin_lock_init(&entry->report_lock); entry->report = report; drv_data->device_props = entry; /* Check if a multimode wheel has been connected and * handle it appropriately */ mmode_ret = lg4ff_handle_multimode_wheel(hid, &real_product_id, bcdDevice); /* Wheel has been told to switch to native mode. There is no point in going on * with the initialization as the wheel will do a USB reset when it switches mode */ if (mmode_ret == LG4FF_MMODE_SWITCHED) return 0; else if (mmode_ret < 0) { hid_err(hid, "Unable to switch device mode during initialization, errno %d\n", mmode_ret); error = mmode_ret; goto err_init; } /* Check what wheel has been connected */ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) { if (hid->product == lg4ff_devices[i].product_id) { dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id); break; } } if (i == ARRAY_SIZE(lg4ff_devices)) { hid_err(hid, "This device is flagged to be handled by the lg4ff module but this module does not know how to handle it. " "Please report this as a bug to LKML, Simon Wood <simon@mungewell.org> or " "Michal Maly <madcatxster@devoid-pointer.net>\n"); error = -1; goto err_init; } if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { for (mmode_idx = 0; mmode_idx < ARRAY_SIZE(lg4ff_multimode_wheels); mmode_idx++) { if (real_product_id == lg4ff_multimode_wheels[mmode_idx].product_id) break; } if (mmode_idx == ARRAY_SIZE(lg4ff_multimode_wheels)) { hid_err(hid, "Device product ID %X is not listed as a multimode wheel", real_product_id); error = -1; goto err_init; } } /* Set supported force feedback capabilities */ for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++) set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit); error = input_ff_create_memless(dev, NULL, lg4ff_play); if (error) goto err_init; /* Initialize device properties */ if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { if (WARN_ON(mmode_idx == -1)) return -EINVAL; mmode_wheel = &lg4ff_multimode_wheels[mmode_idx]; } lg4ff_init_wheel_data(&entry->wdata, &lg4ff_devices[i], mmode_wheel, real_product_id); /* Check if autocentering is available and * set the centering force to zero by default */ if (test_bit(FF_AUTOCENTER, dev->ffbit)) { /* Formula Force EX expects different autocentering command */ if ((bcdDevice >> 8) == LG4FF_FFEX_REV_MAJ && (bcdDevice & 0xff) == LG4FF_FFEX_REV_MIN) dev->ff->set_autocenter = lg4ff_set_autocenter_ffex; else dev->ff->set_autocenter = lg4ff_set_autocenter_default; dev->ff->set_autocenter(dev, 0); } /* Create sysfs interface */ error = device_create_file(&hid->dev, &dev_attr_combine_pedals); if (error) hid_warn(hid, "Unable to create sysfs interface for \"combine\", errno %d\n", error); error = device_create_file(&hid->dev, &dev_attr_range); if (error) hid_warn(hid, "Unable to create sysfs interface for \"range\", errno %d\n", error); if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { error = device_create_file(&hid->dev, &dev_attr_real_id); if (error) hid_warn(hid, "Unable to create sysfs interface for \"real_id\", errno %d\n", error); error = device_create_file(&hid->dev, &dev_attr_alternate_modes); if (error) hid_warn(hid, "Unable to create sysfs interface for \"alternate_modes\", errno %d\n", error); } dbg_hid("sysfs interface created\n"); /* Set the maximum range to start with */ entry->wdata.range = entry->wdata.max_range; if (entry->wdata.set_range) entry->wdata.set_range(hid, entry->wdata.range); #ifdef CONFIG_LEDS_CLASS /* register led subsystem - G27/G29 only */ entry->wdata.led_state = 0; for (j = 0; j < 5; j++) entry->wdata.led[j] = NULL; if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_G27_WHEEL || lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_G29_WHEEL) { struct led_classdev *led; size_t name_sz; char *name; lg4ff_set_leds(hid, 0); name_sz = strlen(dev_name(&hid->dev)) + 8; for (j = 0; j < 5; j++) { led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL); if (!led) { hid_err(hid, "can't allocate memory for LED %d\n", j); goto err_leds; } name = (void *)(&led[1]); snprintf(name, name_sz, "%s::RPM%d", dev_name(&hid->dev), j+1); led->name = name; led->brightness = 0; led->max_brightness = 1; led->brightness_get = lg4ff_led_get_brightness; led->brightness_set = lg4ff_led_set_brightness; entry->wdata.led[j] = led; error = led_classdev_register(&hid->dev, led); if (error) { hid_err(hid, "failed to register LED %d. Aborting.\n", j); err_leds: /* Deregister LEDs (if any) */ for (j = 0; j < 5; j++) { led = entry->wdata.led[j]; entry->wdata.led[j] = NULL; if (!led) continue; led_classdev_unregister(led); kfree(led); } goto out; /* Let the driver continue without LEDs */ } } } out: #endif hid_info(hid, "Force feedback support for Logitech Gaming Wheels\n"); return 0; err_init: drv_data->device_props = NULL; kfree(entry); return error; } int lg4ff_deinit(struct hid_device *hid) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Error while deinitializing device, no private driver data.\n"); return -1; } entry = drv_data->device_props; if (!entry) goto out; /* Nothing more to do */ /* Multimode devices will have at least the "MODE_NATIVE" bit set */ if (entry->wdata.alternate_modes) { device_remove_file(&hid->dev, &dev_attr_real_id); device_remove_file(&hid->dev, &dev_attr_alternate_modes); } device_remove_file(&hid->dev, &dev_attr_combine_pedals); device_remove_file(&hid->dev, &dev_attr_range); #ifdef CONFIG_LEDS_CLASS { int j; struct led_classdev *led; /* Deregister LEDs (if any) */ for (j = 0; j < 5; j++) { led = entry->wdata.led[j]; entry->wdata.led[j] = NULL; if (!led) continue; led_classdev_unregister(led); kfree(led); } } #endif drv_data->device_props = NULL; kfree(entry); out: dbg_hid("Device successfully unregistered\n"); return 0; }
6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 // SPDX-License-Identifier: GPL-2.0-only /* * fs/dax.c - Direct Access filesystem code * Copyright (c) 2013-2014 Intel Corporation * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> * Author: Ross Zwisler <ross.zwisler@linux.intel.com> */ #include <linux/atomic.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/memcontrol.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/pagevec.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/uio.h> #include <linux/vmstat.h> #include <linux/sizes.h> #include <linux/mmu_notifier.h> #include <linux/iomap.h> #include <linux/rmap.h> #include <asm/pgalloc.h> #define CREATE_TRACE_POINTS #include <trace/events/fs_dax.h> /* We choose 4096 entries - same as per-zone page wait tables */ #define DAX_WAIT_TABLE_BITS 12 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) /* The 'colour' (ie low bits) within a PMD of a page offset. */ #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; static int __init init_dax_wait_table(void) { int i; for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) init_waitqueue_head(wait_table + i); return 0; } fs_initcall(init_dax_wait_table); /* * DAX pagecache entries use XArray value entries so they can't be mistaken * for pages. We use one bit for locking, one bit for the entry size (PMD) * and two more to tell us if the entry is a zero page or an empty entry that * is just used for locking. In total four special bits. * * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem * block allocation. */ #define DAX_SHIFT (4) #define DAX_LOCKED (1UL << 0) #define DAX_PMD (1UL << 1) #define DAX_ZERO_PAGE (1UL << 2) #define DAX_EMPTY (1UL << 3) static unsigned long dax_to_pfn(void *entry) { return xa_to_value(entry) >> DAX_SHIFT; } static struct folio *dax_to_folio(void *entry) { return page_folio(pfn_to_page(dax_to_pfn(entry))); } static void *dax_make_entry(unsigned long pfn, unsigned long flags) { return xa_mk_value(flags | (pfn << DAX_SHIFT)); } static bool dax_is_locked(void *entry) { return xa_to_value(entry) & DAX_LOCKED; } static unsigned int dax_entry_order(void *entry) { if (xa_to_value(entry) & DAX_PMD) return PMD_ORDER; return 0; } static unsigned long dax_is_pmd_entry(void *entry) { return xa_to_value(entry) & DAX_PMD; } static bool dax_is_pte_entry(void *entry) { return !(xa_to_value(entry) & DAX_PMD); } static int dax_is_zero_entry(void *entry) { return xa_to_value(entry) & DAX_ZERO_PAGE; } static int dax_is_empty_entry(void *entry) { return xa_to_value(entry) & DAX_EMPTY; } /* * true if the entry that was found is of a smaller order than the entry * we were looking for */ static bool dax_is_conflict(void *entry) { return entry == XA_RETRY_ENTRY; } /* * DAX page cache entry locking */ struct exceptional_entry_key { struct xarray *xa; pgoff_t entry_start; }; struct wait_exceptional_entry_queue { wait_queue_entry_t wait; struct exceptional_entry_key key; }; /** * enum dax_wake_mode: waitqueue wakeup behaviour * @WAKE_ALL: wake all waiters in the waitqueue * @WAKE_NEXT: wake only the first waiter in the waitqueue */ enum dax_wake_mode { WAKE_ALL, WAKE_NEXT, }; static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, void *entry, struct exceptional_entry_key *key) { unsigned long hash; unsigned long index = xas->xa_index; /* * If 'entry' is a PMD, align the 'index' that we use for the wait * queue to the start of that PMD. This ensures that all offsets in * the range covered by the PMD map to the same bit lock. */ if (dax_is_pmd_entry(entry)) index &= ~PG_PMD_COLOUR; key->xa = xas->xa; key->entry_start = index; hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); return wait_table + hash; } static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, int sync, void *keyp) { struct exceptional_entry_key *key = keyp; struct wait_exceptional_entry_queue *ewait = container_of(wait, struct wait_exceptional_entry_queue, wait); if (key->xa != ewait->key.xa || key->entry_start != ewait->key.entry_start) return 0; return autoremove_wake_function(wait, mode, sync, NULL); } /* * @entry may no longer be the entry at the index in the mapping. * The important information it's conveying is whether the entry at * this index used to be a PMD entry. */ static void dax_wake_entry(struct xa_state *xas, void *entry, enum dax_wake_mode mode) { struct exceptional_entry_key key; wait_queue_head_t *wq; wq = dax_entry_waitqueue(xas, entry, &key); /* * Checking for locked entry and prepare_to_wait_exclusive() happens * under the i_pages lock, ditto for entry handling in our callers. * So at this point all tasks that could have seen our entry locked * must be in the waitqueue and the following check will see them. */ if (waitqueue_active(wq)) __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); } /* * Look up entry in page cache, wait for it to become unlocked if it * is a DAX entry and return it. The caller must subsequently call * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() * if it did. The entry returned may have a larger order than @order. * If @order is larger than the order of the entry found in i_pages, this * function returns a dax_is_conflict entry. * * Must be called with the i_pages lock held. */ static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order) { void *entry; struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; for (;;) { entry = xas_find_conflict(xas); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) return entry; if (dax_entry_order(entry) < order) return XA_RETRY_ENTRY; if (!dax_is_locked(entry)) return entry; wq = dax_entry_waitqueue(xas, entry, &ewait.key); prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_unlock_irq(xas); xas_reset(xas); schedule(); finish_wait(wq, &ewait.wait); xas_lock_irq(xas); } } /* * Wait for the given entry to become unlocked. Caller must hold the i_pages * lock and call either put_unlocked_entry() if it did not lock the entry or * dax_unlock_entry() if it did. Returns an unlocked entry if still present. */ static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry) { struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; while (unlikely(dax_is_locked(entry))) { wq = dax_entry_waitqueue(xas, entry, &ewait.key); prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_reset(xas); xas_unlock_irq(xas); schedule(); finish_wait(wq, &ewait.wait); xas_lock_irq(xas); entry = xas_load(xas); } if (xa_is_internal(entry)) return NULL; return entry; } /* * The only thing keeping the address space around is the i_pages lock * (it's cycled in clear_inode() after removing the entries from i_pages) * After we call xas_unlock_irq(), we cannot touch xas->xa. */ static void wait_entry_unlocked(struct xa_state *xas, void *entry) { struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; wq = dax_entry_waitqueue(xas, entry, &ewait.key); /* * Unlike get_next_unlocked_entry() there is no guarantee that this * path ever successfully retrieves an unlocked entry before an * inode dies. Perform a non-exclusive wait in case this path * never successfully performs its own wake up. */ prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_unlock_irq(xas); schedule(); finish_wait(wq, &ewait.wait); } static void put_unlocked_entry(struct xa_state *xas, void *entry, enum dax_wake_mode mode) { if (entry && !dax_is_conflict(entry)) dax_wake_entry(xas, entry, mode); } /* * We used the xa_state to get the entry, but then we locked the entry and * dropped the xa_lock, so we know the xa_state is stale and must be reset * before use. */ static void dax_unlock_entry(struct xa_state *xas, void *entry) { void *old; BUG_ON(dax_is_locked(entry)); xas_reset(xas); xas_lock_irq(xas); old = xas_store(xas, entry); xas_unlock_irq(xas); BUG_ON(!dax_is_locked(old)); dax_wake_entry(xas, entry, WAKE_NEXT); } /* * Return: The entry stored at this location before it was locked. */ static void *dax_lock_entry(struct xa_state *xas, void *entry) { unsigned long v = xa_to_value(entry); return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); } static unsigned long dax_entry_size(void *entry) { if (dax_is_zero_entry(entry)) return 0; else if (dax_is_empty_entry(entry)) return 0; else if (dax_is_pmd_entry(entry)) return PMD_SIZE; else return PAGE_SIZE; } /* * A DAX folio is considered shared if it has no mapping set and ->share (which * shares the ->index field) is non-zero. Note this may return false even if the * page is shared between multiple files but has not yet actually been mapped * into multiple address spaces. */ static inline bool dax_folio_is_shared(struct folio *folio) { return !folio->mapping && folio->share; } /* * When it is called by dax_insert_entry(), the shared flag will indicate * whether this entry is shared by multiple files. If the page has not * previously been associated with any mappings the ->mapping and ->index * fields will be set. If it has already been associated with a mapping * the mapping will be cleared and the share count set. It's then up to * reverse map users like memory_failure() to call back into the filesystem to * recover ->mapping and ->index information. For example by implementing * dax_holder_operations. */ static void dax_folio_make_shared(struct folio *folio) { /* * folio is not currently shared so mark it as shared by clearing * folio->mapping. */ folio->mapping = NULL; /* * folio has previously been mapped into one address space so set the * share count. */ folio->share = 1; } static inline unsigned long dax_folio_put(struct folio *folio) { unsigned long ref; int order, i; if (!dax_folio_is_shared(folio)) ref = 0; else ref = --folio->share; if (ref) return ref; folio->mapping = NULL; order = folio_order(folio); if (!order) return 0; folio_reset_order(folio); for (i = 0; i < (1UL << order); i++) { struct dev_pagemap *pgmap = page_pgmap(&folio->page); struct page *page = folio_page(folio, i); struct folio *new_folio = (struct folio *)page; ClearPageHead(page); clear_compound_head(page); new_folio->mapping = NULL; /* * Reset pgmap which was over-written by * prep_compound_page(). */ new_folio->pgmap = pgmap; new_folio->share = 0; WARN_ON_ONCE(folio_ref_count(new_folio)); } return ref; } static void dax_folio_init(void *entry) { struct folio *folio = dax_to_folio(entry); int order = dax_entry_order(entry); /* * Folio should have been split back to order-0 pages in * dax_folio_put() when they were removed from their * final mapping. */ WARN_ON_ONCE(folio_order(folio)); if (order > 0) { prep_compound_page(&folio->page, order); if (order > 1) INIT_LIST_HEAD(&folio->_deferred_list); WARN_ON_ONCE(folio_ref_count(folio)); } } static void dax_associate_entry(void *entry, struct address_space *mapping, struct vm_area_struct *vma, unsigned long address, bool shared) { unsigned long size = dax_entry_size(entry), index; struct folio *folio = dax_to_folio(entry); if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) return; index = linear_page_index(vma, address & ~(size - 1)); if (shared && (folio->mapping || dax_folio_is_shared(folio))) { if (folio->mapping) dax_folio_make_shared(folio); WARN_ON_ONCE(!folio->share); WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); folio->share++; } else { WARN_ON_ONCE(folio->mapping); dax_folio_init(entry); folio = dax_to_folio(entry); folio->mapping = mapping; folio->index = index; } } static void dax_disassociate_entry(void *entry, struct address_space *mapping, bool trunc) { struct folio *folio = dax_to_folio(entry); if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) return; dax_folio_put(folio); } static struct page *dax_busy_page(void *entry) { struct folio *folio = dax_to_folio(entry); if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) return NULL; if (folio_ref_count(folio) - folio_mapcount(folio)) return &folio->page; else return NULL; } /** * dax_lock_folio - Lock the DAX entry corresponding to a folio * @folio: The folio whose entry we want to lock * * Context: Process context. * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could * not be locked. */ dax_entry_t dax_lock_folio(struct folio *folio) { XA_STATE(xas, NULL, 0); void *entry; /* Ensure folio->mapping isn't freed while we look at it */ rcu_read_lock(); for (;;) { struct address_space *mapping = READ_ONCE(folio->mapping); entry = NULL; if (!mapping || !dax_mapping(mapping)) break; /* * In the device-dax case there's no need to lock, a * struct dev_pagemap pin is sufficient to keep the * inode alive, and we assume we have dev_pagemap pin * otherwise we would not have a valid pfn_to_page() * translation. */ entry = (void *)~0UL; if (S_ISCHR(mapping->host->i_mode)) break; xas.xa = &mapping->i_pages; xas_lock_irq(&xas); if (mapping != folio->mapping) { xas_unlock_irq(&xas); continue; } xas_set(&xas, folio->index); entry = xas_load(&xas); if (dax_is_locked(entry)) { rcu_read_unlock(); wait_entry_unlocked(&xas, entry); rcu_read_lock(); continue; } dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); break; } rcu_read_unlock(); return (dax_entry_t)entry; } void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) { struct address_space *mapping = folio->mapping; XA_STATE(xas, &mapping->i_pages, folio->index); if (S_ISCHR(mapping->host->i_mode)) return; dax_unlock_entry(&xas, (void *)cookie); } /* * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping * @mapping: the file's mapping whose entry we want to lock * @index: the offset within this file * @page: output the dax page corresponding to this dax entry * * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry * could not be locked. */ dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, struct page **page) { XA_STATE(xas, NULL, 0); void *entry; rcu_read_lock(); for (;;) { entry = NULL; if (!dax_mapping(mapping)) break; xas.xa = &mapping->i_pages; xas_lock_irq(&xas); xas_set(&xas, index); entry = xas_load(&xas); if (dax_is_locked(entry)) { rcu_read_unlock(); wait_entry_unlocked(&xas, entry); rcu_read_lock(); continue; } if (!entry || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { /* * Because we are looking for entry from file's mapping * and index, so the entry may not be inserted for now, * or even a zero/empty entry. We don't think this is * an error case. So, return a special value and do * not output @page. */ entry = (void *)~0UL; } else { *page = pfn_to_page(dax_to_pfn(entry)); dax_lock_entry(&xas, entry); } xas_unlock_irq(&xas); break; } rcu_read_unlock(); return (dax_entry_t)entry; } void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, dax_entry_t cookie) { XA_STATE(xas, &mapping->i_pages, index); if (cookie == ~0UL) return; dax_unlock_entry(&xas, (void *)cookie); } /* * Find page cache entry at given index. If it is a DAX entry, return it * with the entry locked. If the page cache doesn't contain an entry at * that index, add a locked empty entry. * * When requesting an entry with size DAX_PMD, grab_mapping_entry() will * either return that locked entry or will return VM_FAULT_FALLBACK. * This will happen if there are any PTE entries within the PMD range * that we are requesting. * * We always favor PTE entries over PMD entries. There isn't a flow where we * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD * insertion will fail if it finds any PTE entries already in the tree, and a * PTE insertion will cause an existing PMD entry to be unmapped and * downgraded to PTE entries. This happens for both PMD zero pages as * well as PMD empty entries. * * The exception to this downgrade path is for PMD entries that have * real storage backing them. We will leave these real PMD entries in * the tree, and PTE writes will simply dirty the entire PMD entry. * * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For * persistent memory the benefit is doubtful. We can add that later if we can * show it helps. * * On error, this function does not return an ERR_PTR. Instead it returns * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values * overlap with xarray value entries. */ static void *grab_mapping_entry(struct xa_state *xas, struct address_space *mapping, unsigned int order) { unsigned long index = xas->xa_index; bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ void *entry; retry: pmd_downgrade = false; xas_lock_irq(xas); entry = get_next_unlocked_entry(xas, order); if (entry) { if (dax_is_conflict(entry)) goto fallback; if (!xa_is_value(entry)) { xas_set_err(xas, -EIO); goto out_unlock; } if (order == 0) { if (dax_is_pmd_entry(entry) && (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))) { pmd_downgrade = true; } } } if (pmd_downgrade) { /* * Make sure 'entry' remains valid while we drop * the i_pages lock. */ dax_lock_entry(xas, entry); /* * Besides huge zero pages the only other thing that gets * downgraded are empty entries which don't need to be * unmapped. */ if (dax_is_zero_entry(entry)) { xas_unlock_irq(xas); unmap_mapping_pages(mapping, xas->xa_index & ~PG_PMD_COLOUR, PG_PMD_NR, false); xas_reset(xas); xas_lock_irq(xas); } dax_disassociate_entry(entry, mapping, false); xas_store(xas, NULL); /* undo the PMD join */ dax_wake_entry(xas, entry, WAKE_ALL); mapping->nrpages -= PG_PMD_NR; entry = NULL; xas_set(xas, index); } if (entry) { dax_lock_entry(xas, entry); } else { unsigned long flags = DAX_EMPTY; if (order > 0) flags |= DAX_PMD; entry = dax_make_entry(0, flags); dax_lock_entry(xas, entry); if (xas_error(xas)) goto out_unlock; mapping->nrpages += 1UL << order; } out_unlock: xas_unlock_irq(xas); if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) goto retry; if (xas->xa_node == XA_ERROR(-ENOMEM)) return xa_mk_internal(VM_FAULT_OOM); if (xas_error(xas)) return xa_mk_internal(VM_FAULT_SIGBUS); return entry; fallback: xas_unlock_irq(xas); return xa_mk_internal(VM_FAULT_FALLBACK); } /** * dax_layout_busy_page_range - find first pinned page in @mapping * @mapping: address space to scan for a page with ref count > 1 * @start: Starting offset. Page containing 'start' is included. * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, * pages from 'start' till the end of file are included. * * DAX requires ZONE_DEVICE mapped pages. These pages are never * 'onlined' to the page allocator so they are considered idle when * page->count == 1. A filesystem uses this interface to determine if * any page in the mapping is busy, i.e. for DMA, or other * get_user_pages() usages. * * It is expected that the filesystem is holding locks to block the * establishment of new mappings in this address_space. I.e. it expects * to be able to run unmap_mapping_range() and subsequently not race * mapping_mapped() becoming true. */ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end) { void *entry; unsigned int scanned = 0; struct page *page = NULL; pgoff_t start_idx = start >> PAGE_SHIFT; pgoff_t end_idx; XA_STATE(xas, &mapping->i_pages, start_idx); if (!dax_mapping(mapping)) return NULL; /* If end == LLONG_MAX, all pages from start to till end of file */ if (end == LLONG_MAX) end_idx = ULONG_MAX; else end_idx = end >> PAGE_SHIFT; /* * If we race get_user_pages_fast() here either we'll see the * elevated page count in the iteration and wait, or * get_user_pages_fast() will see that the page it took a reference * against is no longer mapped in the page tables and bail to the * get_user_pages() slow path. The slow path is protected by * pte_lock() and pmd_lock(). New references are not taken without * holding those locks, and unmap_mapping_pages() will not zero the * pte or pmd without holding the respective lock, so we are * guaranteed to either see new references or prevent new * references from being established. */ unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); xas_lock_irq(&xas); xas_for_each(&xas, entry, end_idx) { if (WARN_ON_ONCE(!xa_is_value(entry))) continue; entry = wait_entry_unlocked_exclusive(&xas, entry); if (entry) page = dax_busy_page(entry); put_unlocked_entry(&xas, entry, WAKE_NEXT); if (page) break; if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return page; } EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); struct page *dax_layout_busy_page(struct address_space *mapping) { return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); } EXPORT_SYMBOL_GPL(dax_layout_busy_page); static int __dax_invalidate_entry(struct address_space *mapping, pgoff_t index, bool trunc) { XA_STATE(xas, &mapping->i_pages, index); int ret = 0; void *entry; xas_lock_irq(&xas); entry = get_next_unlocked_entry(&xas, 0); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto out; if (!trunc && (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) goto out; dax_disassociate_entry(entry, mapping, trunc); xas_store(&xas, NULL); mapping->nrpages -= 1UL << dax_entry_order(entry); ret = 1; out: put_unlocked_entry(&xas, entry, WAKE_ALL); xas_unlock_irq(&xas); return ret; } static int __dax_clear_dirty_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { XA_STATE(xas, &mapping->i_pages, start); unsigned int scanned = 0; void *entry; xas_lock_irq(&xas); xas_for_each(&xas, entry, end) { entry = wait_entry_unlocked_exclusive(&xas, entry); if (!entry) continue; xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); put_unlocked_entry(&xas, entry, WAKE_NEXT); if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return 0; } /* * Delete DAX entry at @index from @mapping. Wait for it * to be unlocked before deleting it. */ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) { int ret = __dax_invalidate_entry(mapping, index, true); /* * This gets called from truncate / punch_hole path. As such, the caller * must hold locks protecting against concurrent modifications of the * page cache (usually fs-private i_mmap_sem for writing). Since the * caller has seen a DAX entry for this index, we better find it * at that index as well... */ WARN_ON_ONCE(!ret); return ret; } void dax_delete_mapping_range(struct address_space *mapping, loff_t start, loff_t end) { void *entry; pgoff_t start_idx = start >> PAGE_SHIFT; pgoff_t end_idx; XA_STATE(xas, &mapping->i_pages, start_idx); /* If end == LLONG_MAX, all pages from start to till end of file */ if (end == LLONG_MAX) end_idx = ULONG_MAX; else end_idx = end >> PAGE_SHIFT; xas_lock_irq(&xas); xas_for_each(&xas, entry, end_idx) { if (!xa_is_value(entry)) continue; entry = wait_entry_unlocked_exclusive(&xas, entry); if (!entry) continue; dax_disassociate_entry(entry, mapping, true); xas_store(&xas, NULL); mapping->nrpages -= 1UL << dax_entry_order(entry); put_unlocked_entry(&xas, entry, WAKE_ALL); } xas_unlock_irq(&xas); } EXPORT_SYMBOL_GPL(dax_delete_mapping_range); static int wait_page_idle(struct page *page, void (cb)(struct inode *), struct inode *inode) { return ___wait_var_event(page, dax_page_is_idle(page), TASK_INTERRUPTIBLE, 0, 0, cb(inode)); } static void wait_page_idle_uninterruptible(struct page *page, struct inode *inode) { ___wait_var_event(page, dax_page_is_idle(page), TASK_UNINTERRUPTIBLE, 0, 0, schedule()); } /* * Unmaps the inode and waits for any DMA to complete prior to deleting the * DAX mapping entries for the range. * * For NOWAIT behavior, pass @cb as NULL to early-exit on first found * busy page */ int dax_break_layout(struct inode *inode, loff_t start, loff_t end, void (cb)(struct inode *)) { struct page *page; int error = 0; if (!dax_mapping(inode->i_mapping)) return 0; do { page = dax_layout_busy_page_range(inode->i_mapping, start, end); if (!page) break; if (!cb) { error = -ERESTARTSYS; break; } error = wait_page_idle(page, cb, inode); } while (error == 0); if (!page) dax_delete_mapping_range(inode->i_mapping, start, end); return error; } EXPORT_SYMBOL_GPL(dax_break_layout); void dax_break_layout_final(struct inode *inode) { struct page *page; if (!dax_mapping(inode->i_mapping)) return; do { page = dax_layout_busy_page_range(inode->i_mapping, 0, LLONG_MAX); if (!page) break; wait_page_idle_uninterruptible(page, inode); } while (true); if (!page) dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); } EXPORT_SYMBOL_GPL(dax_break_layout_final); /* * Invalidate DAX entry if it is clean. */ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index) { return __dax_invalidate_entry(mapping, index, false); } static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) { return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); } static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) { pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); void *vto, *kaddr; long rc; int id; id = dax_read_lock(); rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; } vto = kmap_atomic(vmf->cow_page); copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); kunmap_atomic(vto); dax_read_unlock(id); return 0; } /* * MAP_SYNC on a dax mapping guarantees dirty metadata is * flushed on write-faults (non-cow), but not read-faults. */ static bool dax_fault_is_synchronous(const struct iomap_iter *iter, struct vm_area_struct *vma) { return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && (iter->iomap.flags & IOMAP_F_DIRTY); } /* * By this point grab_mapping_entry() has ensured that we have a locked entry * of the appropriate size so we don't have to worry about downgrading PMDs to * PTEs. If we happen to be trying to insert a PTE and there is a PMD * already in the tree, we will skip the insertion and just dirty the PMD as * appropriate. */ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void *entry, unsigned long pfn, unsigned long flags) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; void *new_entry = dax_make_entry(pfn, flags); bool write = iter->flags & IOMAP_WRITE; bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); bool shared = iter->iomap.flags & IOMAP_F_SHARED; if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { unsigned long index = xas->xa_index; /* we are replacing a zero page with block mapping */ if (dax_is_pmd_entry(entry)) unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, PG_PMD_NR, false); else /* pte entry */ unmap_mapping_pages(mapping, index, 1, false); } xas_reset(xas); xas_lock_irq(xas); if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { void *old; dax_disassociate_entry(entry, mapping, false); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, shared); /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or * PMD entry is already in the cache, we leave it alone. This * means that if we are trying to insert a PTE and the * existing entry is a PMD, we will just leave the PMD in the * tree and dirty it if necessary. */ old = dax_lock_entry(xas, new_entry); WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | DAX_LOCKED)); entry = new_entry; } else { xas_load(xas); /* Walk the xa_state */ } if (dirty) xas_set_mark(xas, PAGECACHE_TAG_DIRTY); if (write && shared) xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irq(xas); return entry; } static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, struct address_space *mapping, void *entry) { unsigned long pfn, index, count, end; long ret = 0; struct vm_area_struct *vma; /* * A page got tagged dirty in DAX mapping? Something is seriously * wrong. */ if (WARN_ON(!xa_is_value(entry))) return -EIO; if (unlikely(dax_is_locked(entry))) { void *old_entry = entry; entry = get_next_unlocked_entry(xas, 0); /* Entry got punched out / reallocated? */ if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto put_unlocked; /* * Entry got reallocated elsewhere? No need to writeback. * We have to compare pfns as we must not bail out due to * difference in lockbit or entry type. */ if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) goto put_unlocked; if (WARN_ON_ONCE(dax_is_empty_entry(entry) || dax_is_zero_entry(entry))) { ret = -EIO; goto put_unlocked; } /* Another fsync thread may have already done this entry */ if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) goto put_unlocked; } /* Lock the entry to serialize with page faults */ dax_lock_entry(xas, entry); /* * We can clear the tag now but we have to be careful so that concurrent * dax_writeback_one() calls for the same index cannot finish before we * actually flush the caches. This is achieved as the calls will look * at the entry only under the i_pages lock and once they do that * they will see the entry locked and wait for it to unlock. */ xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irq(xas); /* * If dax_writeback_mapping_range() was given a wbc->range_start * in the middle of a PMD, the 'index' we use needs to be * aligned to the start of the PMD. * This allows us to flush for PMD_SIZE and not have to worry about * partial PMD writebacks. */ pfn = dax_to_pfn(entry); count = 1UL << dax_entry_order(entry); index = xas->xa_index & ~(count - 1); end = index + count - 1; /* Walk all mappings of a given index of a file and writeprotect them */ i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { pfn_mkclean_range(pfn, count, index, vma); cond_resched(); } i_mmap_unlock_read(mapping); dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); /* * After we have flushed the cache, we can clear the dirty tag. There * cannot be new dirty data in the pfn after the flush has completed as * the pfn mappings are writeprotected and fault waits for mapping * entry lock. */ xas_reset(xas); xas_lock_irq(xas); xas_store(xas, entry); xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); dax_wake_entry(xas, entry, WAKE_NEXT); trace_dax_writeback_one(mapping->host, index, count); return ret; put_unlocked: put_unlocked_entry(xas, entry, WAKE_NEXT); return ret; } /* * Flush the mapping to the persistent domain within the byte range of [start, * end]. This is required by data integrity operations to ensure file data is * on persistent storage prior to completion of the operation. */ int dax_writeback_mapping_range(struct address_space *mapping, struct dax_device *dax_dev, struct writeback_control *wbc) { XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); struct inode *inode = mapping->host; pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; void *entry; int ret = 0; unsigned int scanned = 0; if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) return -EIO; if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) return 0; trace_dax_writeback_range(inode, xas.xa_index, end_index); tag_pages_for_writeback(mapping, xas.xa_index, end_index); xas_lock_irq(&xas); xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { ret = dax_writeback_one(&xas, dax_dev, mapping, entry); if (ret < 0) { mapping_set_error(mapping, ret); break; } if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); trace_dax_writeback_range_done(inode, xas.xa_index, end_index); return ret; } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, size_t size, void **kaddr, unsigned long *pfnp) { pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); int id, rc = 0; long length; id = dax_read_lock(); length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, kaddr, pfnp); if (length < 0) { rc = length; goto out; } if (!pfnp) goto out_check_addr; rc = -EINVAL; if (PFN_PHYS(length) < size) goto out; if (*pfnp & (PHYS_PFN(size)-1)) goto out; rc = 0; out_check_addr: if (!kaddr) goto out; if (!*kaddr) rc = -EFAULT; out: dax_read_unlock(id); return rc; } /** * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page * by copying the data before and after the range to be written. * @pos: address to do copy from. * @length: size of copy operation. * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) * @srcmap: iomap srcmap * @daddr: destination address to copy to. * * This can be called from two places. Either during DAX write fault (page * aligned), to copy the length size data to daddr. Or, while doing normal DAX * write operation, dax_iomap_iter() might call this to do the copy of either * start or end unaligned address. In the latter case the rest of the copy of * aligned ranges is taken care by dax_iomap_iter() itself. * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the * area to make sure no old data remains. */ static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size, const struct iomap *srcmap, void *daddr) { loff_t head_off = pos & (align_size - 1); size_t size = ALIGN(head_off + length, align_size); loff_t end = pos + length; loff_t pg_end = round_up(end, align_size); /* copy_all is usually in page fault case */ bool copy_all = head_off == 0 && end == pg_end; /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */ bool zero_edge = srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN; void *saddr = NULL; int ret = 0; if (!zero_edge) { ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); if (ret) return dax_mem2blk_err(ret); } if (copy_all) { if (zero_edge) memset(daddr, 0, size); else ret = copy_mc_to_kernel(daddr, saddr, length); goto out; } /* Copy the head part of the range */ if (head_off) { if (zero_edge) memset(daddr, 0, head_off); else { ret = copy_mc_to_kernel(daddr, saddr, head_off); if (ret) return -EIO; } } /* Copy the tail part of the range */ if (end < pg_end) { loff_t tail_off = head_off + length; loff_t tail_len = pg_end - end; if (zero_edge) memset(daddr + tail_off, 0, tail_len); else { ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off, tail_len); if (ret) return -EIO; } } out: if (zero_edge) dax_flush(srcmap->dax_dev, daddr, size); return ret ? -EIO : 0; } /* * The user has performed a load from a hole in the file. Allocating a new * page in the file would cause excessive storage usage for workloads with * sparse files. Instead we insert a read-only mapping of the 4k zero page. * If this page is ever written to we will re-fault and change the mapping to * point to real DAX storage instead. */ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { struct inode *inode = iter->inode; unsigned long vaddr = vmf->address; unsigned long pfn = my_zero_pfn(vaddr); vm_fault_t ret; *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false); trace_dax_load_hole(inode, vmf, ret); return ret; } #ifdef CONFIG_FS_DAX_PMD static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; struct inode *inode = mapping->host; struct folio *zero_folio; vm_fault_t ret; zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm); if (unlikely(!zero_folio)) { trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); return VM_FAULT_FALLBACK; } *entry = dax_insert_entry(xas, vmf, iter, *entry, folio_pfn(zero_folio), DAX_PMD | DAX_ZERO_PAGE); ret = vmf_insert_folio_pmd(vmf, zero_folio, false); if (ret == VM_FAULT_NOPAGE) trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); return ret; } #else static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { return VM_FAULT_FALLBACK; } #endif /* CONFIG_FS_DAX_PMD */ static int dax_unshare_iter(struct iomap_iter *iter) { struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t copy_pos = iter->pos; u64 copy_len = iomap_length(iter); u32 mod; int id = 0; s64 ret; void *daddr = NULL, *saddr = NULL; if (!iomap_want_unshare_iter(iter)) return iomap_iter_advance_full(iter); /* * Extend the file range to be aligned to fsblock/pagesize, because * we need to copy entire blocks, not just the byte range specified. * Invalidate the mapping because we're about to CoW. */ mod = offset_in_page(copy_pos); if (mod) { copy_len += mod; copy_pos -= mod; } mod = offset_in_page(copy_pos + copy_len); if (mod) copy_len += PAGE_SIZE - mod; invalidate_inode_pages2_range(iter->inode->i_mapping, copy_pos >> PAGE_SHIFT, (copy_pos + copy_len - 1) >> PAGE_SHIFT); id = dax_read_lock(); ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL); if (ret < 0) goto out_unlock; ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL); if (ret < 0) goto out_unlock; if (copy_mc_to_kernel(daddr, saddr, copy_len) != 0) ret = -EIO; out_unlock: dax_read_unlock(id); if (ret < 0) return dax_mem2blk_err(ret); return iomap_iter_advance_full(iter); } int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, }; loff_t size = i_size_read(inode); int ret; if (pos < 0 || pos >= size) return 0; iter.len = min(len, size - pos); while ((ret = iomap_iter(&iter, ops)) > 0) iter.status = dax_unshare_iter(&iter); return ret; } EXPORT_SYMBOL_GPL(dax_file_unshare); static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); unsigned offset = offset_in_page(pos); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); void *kaddr; long ret; ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); if (ret < 0) return dax_mem2blk_err(ret); memset(kaddr + offset, 0, size); if (iomap->flags & IOMAP_F_SHARED) ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap, kaddr); else dax_flush(iomap->dax_dev, kaddr + offset, size); return ret; } static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); u64 length = iomap_length(iter); int ret; /* already zeroed? we're done. */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) return iomap_iter_advance(iter, &length); /* * invalidate the pages whose sharing state is to be changed * because of CoW. */ if (iomap->flags & IOMAP_F_SHARED) invalidate_inode_pages2_range(iter->inode->i_mapping, iter->pos >> PAGE_SHIFT, (iter->pos + length - 1) >> PAGE_SHIFT); do { loff_t pos = iter->pos; unsigned offset = offset_in_page(pos); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); int id; length = min_t(u64, PAGE_SIZE - offset, length); id = dax_read_lock(); if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE) ret = dax_zero_page_range(iomap->dax_dev, pgoff, 1); else ret = dax_memzero(iter, pos, length); dax_read_unlock(id); if (ret < 0) return ret; ret = iomap_iter_advance(iter, &length); if (ret) return ret; } while (length > 0); if (did_zero) *did_zero = true; return ret; } int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_DAX | IOMAP_ZERO, }; int ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.status = dax_zero_iter(&iter, did_zero); return ret; } EXPORT_SYMBOL_GPL(dax_zero_range); int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops) { unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); /* Block boundary? Nothing to do */ if (!off) return 0; return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); } EXPORT_SYMBOL_GPL(dax_truncate_page); static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter) { const struct iomap *iomap = &iomi->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iomi); loff_t length = iomap_length(iomi); loff_t pos = iomi->pos; struct dax_device *dax_dev = iomap->dax_dev; loff_t end = pos + length, done = 0; bool write = iov_iter_rw(iter) == WRITE; bool cow = write && iomap->flags & IOMAP_F_SHARED; ssize_t ret = 0; size_t xfer; int id; if (!write) { end = min(end, i_size_read(iomi->inode)); if (pos >= end) return 0; if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) { done = iov_iter_zero(min(length, end - pos), iter); return iomap_iter_advance(iomi, &done); } } /* * In DAX mode, enforce either pure overwrites of written extents, or * writes to unwritten extents as part of a copy-on-write operation. */ if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED))) return -EIO; /* * Write can allocate block for an area which has a hole page mapped * into page tables. We have to tear down these mappings so that data * written by write(2) is visible in mmap. */ if (iomap->flags & IOMAP_F_NEW || cow) { /* * Filesystem allows CoW on non-shared extents. The src extents * may have been mmapped with dirty mark before. To be able to * invalidate its dax entries, we need to clear the dirty mark * in advance. */ if (cow) __dax_clear_dirty_range(iomi->inode->i_mapping, pos >> PAGE_SHIFT, (end - 1) >> PAGE_SHIFT); invalidate_inode_pages2_range(iomi->inode->i_mapping, pos >> PAGE_SHIFT, (end - 1) >> PAGE_SHIFT); } id = dax_read_lock(); while ((pos = iomi->pos) < end) { unsigned offset = pos & (PAGE_SIZE - 1); const size_t size = ALIGN(length + offset, PAGE_SIZE); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); ssize_t map_len; bool recovery = false; void *kaddr; if (fatal_signal_pending(current)) { ret = -EINTR; break; } map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, &kaddr, NULL); if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) { map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_RECOVERY_WRITE, &kaddr, NULL); if (map_len > 0) recovery = true; } if (map_len < 0) { ret = dax_mem2blk_err(map_len); break; } if (cow) { ret = dax_iomap_copy_around(pos, length, PAGE_SIZE, srcmap, kaddr); if (ret) break; } map_len = PFN_PHYS(map_len); kaddr += offset; map_len -= offset; if (map_len > end - pos) map_len = end - pos; if (recovery) xfer = dax_recovery_write(dax_dev, pgoff, kaddr, map_len, iter); else if (write) xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, map_len, iter); else xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, map_len, iter); length = xfer; ret = iomap_iter_advance(iomi, &length); if (!ret && xfer == 0) ret = -EFAULT; if (xfer < map_len) break; } dax_read_unlock(id); return ret; } /** * dax_iomap_rw - Perform I/O to a DAX file * @iocb: The control block for this I/O * @iter: The addresses to do I/O from or to * @ops: iomap ops passed from the file system * * This function performs read and write operations to directly mapped * persistent memory. The callers needs to take care of read/write exclusion * and evicting any page cache pages in the region under I/O. */ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops) { struct iomap_iter iomi = { .inode = iocb->ki_filp->f_mapping->host, .pos = iocb->ki_pos, .len = iov_iter_count(iter), .flags = IOMAP_DAX, }; loff_t done = 0; int ret; if (WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC)) return -EIO; if (!iomi.len) return 0; if (iov_iter_rw(iter) == WRITE) { lockdep_assert_held_write(&iomi.inode->i_rwsem); iomi.flags |= IOMAP_WRITE; } else { lockdep_assert_held(&iomi.inode->i_rwsem); } if (iocb->ki_flags & IOCB_NOWAIT) iomi.flags |= IOMAP_NOWAIT; while ((ret = iomap_iter(&iomi, ops)) > 0) iomi.status = dax_iomap_iter(&iomi, iter); done = iomi.pos - iocb->ki_pos; iocb->ki_pos = iomi.pos; return done ? done : ret; } EXPORT_SYMBOL_GPL(dax_iomap_rw); static vm_fault_t dax_fault_return(int error) { if (error == 0) return VM_FAULT_NOPAGE; return vmf_error(error); } /* * When handling a synchronous page fault and the inode need a fsync, we can * insert the PTE/PMD into page tables only after that fsync happened. Skip * insertion for now and return the pfn so that caller can insert it after the * fsync is done. */ static vm_fault_t dax_fault_synchronous_pfnp(unsigned long *pfnp, unsigned long pfn) { if (WARN_ON_ONCE(!pfnp)) return VM_FAULT_SIGBUS; *pfnp = pfn; return VM_FAULT_NEEDDSYNC; } static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, const struct iomap_iter *iter) { vm_fault_t ret; int error = 0; switch (iter->iomap.type) { case IOMAP_HOLE: case IOMAP_UNWRITTEN: clear_user_highpage(vmf->cow_page, vmf->address); break; case IOMAP_MAPPED: error = copy_cow_page_dax(vmf, iter); break; default: WARN_ON_ONCE(1); error = -EIO; break; } if (error) return dax_fault_return(error); __SetPageUptodate(vmf->cow_page); ret = finish_fault(vmf); if (!ret) return VM_FAULT_DONE_COW; return ret; } /** * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. * @vmf: vm fault instance * @iter: iomap iter * @pfnp: pfn to be returned * @xas: the dax mapping tree of a file * @entry: an unlocked dax entry to be inserted * @pmd: distinguish whether it is a pmd fault */ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, const struct iomap_iter *iter, unsigned long *pfnp, struct xa_state *xas, void **entry, bool pmd) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); size_t size = pmd ? PMD_SIZE : PAGE_SIZE; loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; struct folio *folio; int ret, err = 0; unsigned long pfn; void *kaddr; if (!pmd && vmf->cow_page) return dax_fault_cow_page(vmf, iter); /* if we are reading UNWRITTEN and HOLE, return a hole. */ if (!write && (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { if (!pmd) return dax_load_hole(xas, vmf, iter, entry); return dax_pmd_load_hole(xas, vmf, iter, entry); } if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { WARN_ON_ONCE(1); return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; } err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); if (err) return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); if (write && iomap->flags & IOMAP_F_SHARED) { err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr); if (err) return dax_fault_return(err); } folio = dax_to_folio(*entry); if (dax_fault_is_synchronous(iter, vmf->vma)) return dax_fault_synchronous_pfnp(pfnp, pfn); folio_ref_inc(folio); if (pmd) ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write); else ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write); folio_put(folio); return ret; } static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp, int *iomap_errp, const struct iomap_ops *ops) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE(xas, &mapping->i_pages, vmf->pgoff); struct iomap_iter iter = { .inode = mapping->host, .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, .len = PAGE_SIZE, .flags = IOMAP_DAX | IOMAP_FAULT, }; vm_fault_t ret = 0; void *entry; int error; trace_dax_pte_fault(iter.inode, vmf, ret); /* * Check whether offset isn't beyond end of file now. Caller is supposed * to hold locks serializing us with truncate / punch hole so this is * a reliable test. */ if (iter.pos >= i_size_read(iter.inode)) { ret = VM_FAULT_SIGBUS; goto out; } if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) iter.flags |= IOMAP_WRITE; entry = grab_mapping_entry(&xas, mapping, 0); if (xa_is_internal(entry)) { ret = xa_to_internal(entry); goto out; } /* * It is possible, particularly with mixed reads & writes to private * mappings, that we have raced with a PMD fault that overlaps with * the PTE we need to set up. If so just return and the fault will be * retried. */ if (pmd_trans_huge(*vmf->pmd)) { ret = VM_FAULT_NOPAGE; goto unlock_entry; } while ((error = iomap_iter(&iter, ops)) > 0) { if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { iter.status = -EIO; /* fs corruption? */ continue; } ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); if (ret != VM_FAULT_SIGBUS && (iter.iomap.flags & IOMAP_F_NEW)) { count_vm_event(PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); ret |= VM_FAULT_MAJOR; } if (!(ret & VM_FAULT_ERROR)) { u64 length = PAGE_SIZE; iter.status = iomap_iter_advance(&iter, &length); } } if (iomap_errp) *iomap_errp = error; if (!ret && error) ret = dax_fault_return(error); unlock_entry: dax_unlock_entry(&xas, entry); out: trace_dax_pte_fault_done(iter.inode, vmf, ret); return ret; } #ifdef CONFIG_FS_DAX_PMD static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, pgoff_t max_pgoff) { unsigned long pmd_addr = vmf->address & PMD_MASK; bool write = vmf->flags & FAULT_FLAG_WRITE; /* * Make sure that the faulting address's PMD offset (color) matches * the PMD offset from the start of the file. This is necessary so * that a PMD range in the page table overlaps exactly with a PMD * range in the page cache. */ if ((vmf->pgoff & PG_PMD_COLOUR) != ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) return true; /* Fall back to PTEs if we're going to COW */ if (write && !(vmf->vma->vm_flags & VM_SHARED)) return true; /* If the PMD would extend outside the VMA */ if (pmd_addr < vmf->vma->vm_start) return true; if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) return true; /* If the PMD would extend beyond the file size */ if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) return true; return false; } static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp, const struct iomap_ops *ops) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); struct iomap_iter iter = { .inode = mapping->host, .len = PMD_SIZE, .flags = IOMAP_DAX | IOMAP_FAULT, }; vm_fault_t ret = VM_FAULT_FALLBACK; pgoff_t max_pgoff; void *entry; if (vmf->flags & FAULT_FLAG_WRITE) iter.flags |= IOMAP_WRITE; /* * Check whether offset isn't beyond end of file now. Caller is * supposed to hold locks serializing us with truncate / punch hole so * this is a reliable test. */ max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); if (xas.xa_index >= max_pgoff) { ret = VM_FAULT_SIGBUS; goto out; } if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) goto fallback; /* * grab_mapping_entry() will make sure we get an empty PMD entry, * a zero PMD entry or a DAX PMD. If it can't (because a PTE * entry is already in the array, for instance), it will return * VM_FAULT_FALLBACK. */ entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); if (xa_is_internal(entry)) { ret = xa_to_internal(entry); goto fallback; } /* * It is possible, particularly with mixed reads & writes to private * mappings, that we have raced with a PTE fault that overlaps with * the PMD we need to set up. If so just return and the fault will be * retried. */ if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd)) { ret = 0; goto unlock_entry; } iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; while (iomap_iter(&iter, ops) > 0) { if (iomap_length(&iter) < PMD_SIZE) continue; /* actually breaks out of the loop */ ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); if (ret != VM_FAULT_FALLBACK) { u64 length = PMD_SIZE; iter.status = iomap_iter_advance(&iter, &length); } } unlock_entry: dax_unlock_entry(&xas, entry); fallback: if (ret == VM_FAULT_FALLBACK) { split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); count_vm_event(THP_FAULT_FALLBACK); } out: trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); return ret; } #else static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp, const struct iomap_ops *ops) { return VM_FAULT_FALLBACK; } #endif /* CONFIG_FS_DAX_PMD */ /** * dax_iomap_fault - handle a page fault on a DAX file * @vmf: The description of the fault * @order: Order of the page to fault in * @pfnp: PFN to insert for synchronous faults if fsync is required * @iomap_errp: Storage for detailed error code in case of error * @ops: Iomap ops passed from the file system * * When a page fault occurs, filesystems may call this helper in * their fault handler for DAX files. dax_iomap_fault() assumes the caller * has done all the necessary locking for page fault to proceed * successfully. */ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, unsigned long *pfnp, int *iomap_errp, const struct iomap_ops *ops) { if (order == 0) return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); else if (order == PMD_ORDER) return dax_iomap_pmd_fault(vmf, pfnp, ops); else return VM_FAULT_FALLBACK; } EXPORT_SYMBOL_GPL(dax_iomap_fault); /* * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables * @vmf: The description of the fault * @pfn: PFN to insert * @order: Order of entry to insert. * * This function inserts a writeable PTE or PMD entry into the page tables * for an mmaped DAX file. It also marks the page cache entry as dirty. */ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, unsigned long pfn, unsigned int order) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); struct folio *folio; void *entry; vm_fault_t ret; xas_lock_irq(&xas); entry = get_next_unlocked_entry(&xas, order); /* Did we race with someone splitting entry or so? */ if (!entry || dax_is_conflict(entry) || (order == 0 && !dax_is_pte_entry(entry))) { put_unlocked_entry(&xas, entry, WAKE_NEXT); xas_unlock_irq(&xas); trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, VM_FAULT_NOPAGE); return VM_FAULT_NOPAGE; } xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); folio = pfn_folio(pfn); folio_ref_inc(folio); if (order == 0) ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); #ifdef CONFIG_FS_DAX_PMD else if (order == PMD_ORDER) ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE); #endif else ret = VM_FAULT_FALLBACK; folio_put(folio); dax_unlock_entry(&xas, entry); trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); return ret; } /** * dax_finish_sync_fault - finish synchronous page fault * @vmf: The description of the fault * @order: Order of entry to be inserted * @pfn: PFN to insert * * This function ensures that the file range touched by the page fault is * stored persistently on the media and handles inserting of appropriate page * table entry. */ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, unsigned long pfn) { int err; loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; size_t len = PAGE_SIZE << order; err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); if (err) return VM_FAULT_SIGBUS; return dax_insert_pfn_mkwrite(vmf, pfn, order); } EXPORT_SYMBOL_GPL(dax_finish_sync_fault); static int dax_range_compare_iter(struct iomap_iter *it_src, struct iomap_iter *it_dest, u64 len, bool *same) { const struct iomap *smap = &it_src->iomap; const struct iomap *dmap = &it_dest->iomap; loff_t pos1 = it_src->pos, pos2 = it_dest->pos; u64 dest_len; void *saddr, *daddr; int id, ret; len = min(len, min(smap->length, dmap->length)); if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { *same = true; goto advance; } if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { *same = false; return 0; } id = dax_read_lock(); ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), &saddr, NULL); if (ret < 0) goto out_unlock; ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), &daddr, NULL); if (ret < 0) goto out_unlock; *same = !memcmp(saddr, daddr, len); if (!*same) len = 0; dax_read_unlock(id); advance: dest_len = len; ret = iomap_iter_advance(it_src, &len); if (!ret) ret = iomap_iter_advance(it_dest, &dest_len); return ret; out_unlock: dax_read_unlock(id); return -EIO; } int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dst, loff_t dstoff, loff_t len, bool *same, const struct iomap_ops *ops) { struct iomap_iter src_iter = { .inode = src, .pos = srcoff, .len = len, .flags = IOMAP_DAX, }; struct iomap_iter dst_iter = { .inode = dst, .pos = dstoff, .len = len, .flags = IOMAP_DAX, }; int ret, status; while ((ret = iomap_iter(&src_iter, ops)) > 0 && (ret = iomap_iter(&dst_iter, ops)) > 0) { status = dax_range_compare_iter(&src_iter, &dst_iter, min(src_iter.len, dst_iter.len), same); if (status < 0) return ret; src_iter.status = dst_iter.status = status; } return ret; } int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags, const struct iomap_ops *ops) { return __generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, len, remap_flags, ops); } EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
17 17 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 // SPDX-License-Identifier: GPL-2.0 /* * SP800-108 Key-derivation function * * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de> */ #include <linux/fips.h> #include <linux/module.h> #include <crypto/kdf_sp800108.h> #include <crypto/internal/kdf_selftest.h> /* * SP800-108 CTR KDF implementation */ int crypto_kdf108_ctr_generate(struct crypto_shash *kmd, const struct kvec *info, unsigned int info_nvec, u8 *dst, unsigned int dlen) { SHASH_DESC_ON_STACK(desc, kmd); __be32 counter = cpu_to_be32(1); const unsigned int h = crypto_shash_digestsize(kmd), dlen_orig = dlen; unsigned int i; int err = 0; u8 *dst_orig = dst; desc->tfm = kmd; while (dlen) { err = crypto_shash_init(desc); if (err) goto out; err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32)); if (err) goto out; for (i = 0; i < info_nvec; i++) { err = crypto_shash_update(desc, info[i].iov_base, info[i].iov_len); if (err) goto out; } if (dlen < h) { u8 tmpbuffer[HASH_MAX_DIGESTSIZE]; err = crypto_shash_final(desc, tmpbuffer); if (err) goto out; memcpy(dst, tmpbuffer, dlen); memzero_explicit(tmpbuffer, h); goto out; } err = crypto_shash_final(desc, dst); if (err) goto out; dlen -= h; dst += h; counter = cpu_to_be32(be32_to_cpu(counter) + 1); } out: if (err) memzero_explicit(dst_orig, dlen_orig); shash_desc_zero(desc); return err; } EXPORT_SYMBOL(crypto_kdf108_ctr_generate); /* * The seeding of the KDF */ int crypto_kdf108_setkey(struct crypto_shash *kmd, const u8 *key, size_t keylen, const u8 *ikm, size_t ikmlen) { unsigned int ds = crypto_shash_digestsize(kmd); /* SP800-108 does not support IKM */ if (ikm || ikmlen) return -EINVAL; /* Check according to SP800-108 section 7.2 */ if (ds > keylen) return -EINVAL; /* Set the key for the MAC used for the KDF. */ return crypto_shash_setkey(kmd, key, keylen); } EXPORT_SYMBOL(crypto_kdf108_setkey); /* * Test vector obtained from * http://csrc.nist.gov/groups/STM/cavp/documents/KBKDF800-108/CounterMode.zip */ static const struct kdf_testvec kdf_ctr_hmac_sha256_tv_template[] = { { .key = "\xdd\x1d\x91\xb7\xd9\x0b\x2b\xd3" "\x13\x85\x33\xce\x92\xb2\x72\xfb" "\xf8\xa3\x69\x31\x6a\xef\xe2\x42" "\xe6\x59\xcc\x0a\xe2\x38\xaf\xe0", .keylen = 32, .ikm = NULL, .ikmlen = 0, .info = { .iov_base = "\x01\x32\x2b\x96\xb3\x0a\xcd\x19" "\x79\x79\x44\x4e\x46\x8e\x1c\x5c" "\x68\x59\xbf\x1b\x1c\xf9\x51\xb7" "\xe7\x25\x30\x3e\x23\x7e\x46\xb8" "\x64\xa1\x45\xfa\xb2\x5e\x51\x7b" "\x08\xf8\x68\x3d\x03\x15\xbb\x29" "\x11\xd8\x0a\x0e\x8a\xba\x17\xf3" "\xb4\x13\xfa\xac", .iov_len = 60 }, .expected = "\x10\x62\x13\x42\xbf\xb0\xfd\x40" "\x04\x6c\x0e\x29\xf2\xcf\xdb\xf0", .expectedlen = 16 } }; static int __init crypto_kdf108_init(void) { int ret; if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return 0; ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)", crypto_kdf108_setkey, crypto_kdf108_ctr_generate); if (ret) { if (fips_enabled) panic("alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", ret); WARN(1, "alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n", ret); } else if (fips_enabled) { pr_info("alg: self-tests for CTR-KDF (hmac(sha256)) passed\n"); } return ret; } static void __exit crypto_kdf108_exit(void) { } module_init(crypto_kdf108_init); module_exit(crypto_kdf108_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); MODULE_DESCRIPTION("Key Derivation Function conformant to SP800-108");
1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 // SPDX-License-Identifier: GPL-2.0 /* * Greybus "AP" USB driver for "ES2" controller chips * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/kthread.h> #include <linux/sizes.h> #include <linux/usb.h> #include <linux/kfifo.h> #include <linux/debugfs.h> #include <linux/list.h> #include <linux/greybus.h> #include <linux/unaligned.h> #include "arpc.h" #include "greybus_trace.h" /* Default timeout for USB vendor requests. */ #define ES2_USB_CTRL_TIMEOUT 500 /* Default timeout for ARPC CPort requests */ #define ES2_ARPC_CPORT_TIMEOUT 500 /* Fixed CPort numbers */ #define ES2_CPORT_CDSI0 16 #define ES2_CPORT_CDSI1 17 /* Memory sizes for the buffers sent to/from the ES2 controller */ #define ES2_GBUF_MSG_SIZE_MAX 2048 /* Memory sizes for the ARPC buffers */ #define ARPC_OUT_SIZE_MAX U16_MAX #define ARPC_IN_SIZE_MAX 128 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x18d1, 0x1eaf) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); #define APB1_LOG_SIZE SZ_16K /* * Number of CPort IN urbs in flight at any point in time. * Adjust if we are having stalls in the USB buffer due to not enough urbs in * flight. */ #define NUM_CPORT_IN_URB 4 /* Number of CPort OUT urbs in flight at any point in time. * Adjust if we get messages saying we are out of urbs in the system log. */ #define NUM_CPORT_OUT_URB 8 /* * Number of ARPC in urbs in flight at any point in time. */ #define NUM_ARPC_IN_URB 2 /* * @endpoint: bulk in endpoint for CPort data * @urb: array of urbs for the CPort in messages * @buffer: array of buffers for the @cport_in_urb urbs */ struct es2_cport_in { __u8 endpoint; struct urb *urb[NUM_CPORT_IN_URB]; u8 *buffer[NUM_CPORT_IN_URB]; }; /** * struct es2_ap_dev - ES2 USB Bridge to AP structure * @usb_dev: pointer to the USB device we are. * @usb_intf: pointer to the USB interface we are bound to. * @hd: pointer to our gb_host_device structure * * @cport_in: endpoint, urbs and buffer for cport in messages * @cport_out_endpoint: endpoint for cport out messages * @cport_out_urb: array of urbs for the CPort out messages * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or * not. * @cport_out_urb_cancelled: array of flags indicating whether the * corresponding @cport_out_urb is being cancelled * @cport_out_urb_lock: locks the @cport_out_urb_busy "list" * @cdsi1_in_use: true if cport CDSI1 is in use * @apb_log_task: task pointer for logging thread * @apb_log_dentry: file system entry for the log file interface * @apb_log_enable_dentry: file system entry for enabling logging * @apb_log_fifo: kernel FIFO to carry logged data * @arpc_urb: array of urbs for the ARPC in messages * @arpc_buffer: array of buffers for the @arpc_urb urbs * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC * @arpc_id_cycle: gives an unique id to ARPC * @arpc_lock: locks ARPC list * @arpcs: list of in progress ARPCs */ struct es2_ap_dev { struct usb_device *usb_dev; struct usb_interface *usb_intf; struct gb_host_device *hd; struct es2_cport_in cport_in; __u8 cport_out_endpoint; struct urb *cport_out_urb[NUM_CPORT_OUT_URB]; bool cport_out_urb_busy[NUM_CPORT_OUT_URB]; bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB]; spinlock_t cport_out_urb_lock; bool cdsi1_in_use; struct task_struct *apb_log_task; struct dentry *apb_log_dentry; struct dentry *apb_log_enable_dentry; DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE); __u8 arpc_endpoint_in; struct urb *arpc_urb[NUM_ARPC_IN_URB]; u8 *arpc_buffer[NUM_ARPC_IN_URB]; int arpc_id_cycle; spinlock_t arpc_lock; struct list_head arpcs; }; struct arpc { struct list_head list; struct arpc_request_message *req; struct arpc_response_message *resp; struct completion response_received; bool active; }; static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd) { return (struct es2_ap_dev *)&hd->hd_priv; } static void cport_out_callback(struct urb *urb); static void usb_log_enable(struct es2_ap_dev *es2); static void usb_log_disable(struct es2_ap_dev *es2); static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, size_t size, int *result, unsigned int timeout); static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) { struct usb_device *udev = es2->usb_dev; u8 *data; int retval; data = kmemdup(req, size, GFP_KERNEL); if (!data) return -ENOMEM; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, data, size, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "%s: return error %d\n", __func__, retval); else retval = 0; kfree(data); return retval; } static void ap_urb_complete(struct urb *urb) { struct usb_ctrlrequest *dr = urb->context; kfree(dr); usb_free_urb(urb); } static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) { struct usb_device *udev = es2->usb_dev; struct urb *urb; struct usb_ctrlrequest *dr; u8 *buf; int retval; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } buf = (u8 *)dr + sizeof(*dr); memcpy(buf, req, size); dr->bRequest = cmd; dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE; dr->wValue = 0; dr->wIndex = 0; dr->wLength = cpu_to_le16(size); usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0), (unsigned char *)dr, buf, size, ap_urb_complete, dr); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) { usb_free_urb(urb); kfree(dr); } return retval; } static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, bool async) { struct es2_ap_dev *es2 = hd_to_es2(hd); if (async) return output_async(es2, req, size, cmd); return output_sync(es2, req, size, cmd); } static int es2_cport_in_enable(struct es2_ap_dev *es2, struct es2_cport_in *cport_in) { struct urb *urb; int ret; int i; for (i = 0; i < NUM_CPORT_IN_URB; ++i) { urb = cport_in->urb[i]; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&es2->usb_dev->dev, "failed to submit in-urb: %d\n", ret); goto err_kill_urbs; } } return 0; err_kill_urbs: for (--i; i >= 0; --i) { urb = cport_in->urb[i]; usb_kill_urb(urb); } return ret; } static void es2_cport_in_disable(struct es2_ap_dev *es2, struct es2_cport_in *cport_in) { struct urb *urb; int i; for (i = 0; i < NUM_CPORT_IN_URB; ++i) { urb = cport_in->urb[i]; usb_kill_urb(urb); } } static int es2_arpc_in_enable(struct es2_ap_dev *es2) { struct urb *urb; int ret; int i; for (i = 0; i < NUM_ARPC_IN_URB; ++i) { urb = es2->arpc_urb[i]; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&es2->usb_dev->dev, "failed to submit arpc in-urb: %d\n", ret); goto err_kill_urbs; } } return 0; err_kill_urbs: for (--i; i >= 0; --i) { urb = es2->arpc_urb[i]; usb_kill_urb(urb); } return ret; } static void es2_arpc_in_disable(struct es2_ap_dev *es2) { struct urb *urb; int i; for (i = 0; i < NUM_ARPC_IN_URB; ++i) { urb = es2->arpc_urb[i]; usb_kill_urb(urb); } } static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask) { struct urb *urb = NULL; unsigned long flags; int i; spin_lock_irqsave(&es2->cport_out_urb_lock, flags); /* Look in our pool of allocated urbs first, as that's the "fastest" */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (!es2->cport_out_urb_busy[i] && !es2->cport_out_urb_cancelled[i]) { es2->cport_out_urb_busy[i] = true; urb = es2->cport_out_urb[i]; break; } } spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); if (urb) return urb; /* * Crap, pool is empty, complain to the syslog and go allocate one * dynamically as we have to succeed. */ dev_dbg(&es2->usb_dev->dev, "No free CPort OUT urbs, having to dynamically allocate one!\n"); return usb_alloc_urb(0, gfp_mask); } static void free_urb(struct es2_ap_dev *es2, struct urb *urb) { unsigned long flags; int i; /* * See if this was an urb in our pool, if so mark it "free", otherwise * we need to free it ourselves. */ spin_lock_irqsave(&es2->cport_out_urb_lock, flags); for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (urb == es2->cport_out_urb[i]) { es2->cport_out_urb_busy[i] = false; urb = NULL; break; } } spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* If urb is not NULL, then we need to free this urb */ usb_free_urb(urb); } /* * We (ab)use the operation-message header pad bytes to transfer the * cport id in order to minimise overhead. */ static void gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id) { header->pad[0] = cport_id; } /* Clear the pad bytes used for the CPort id */ static void gb_message_cport_clear(struct gb_operation_msg_hdr *header) { header->pad[0] = 0; } /* Extract the CPort id packed into the header, and clear it */ static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header) { u16 cport_id = header->pad[0]; gb_message_cport_clear(header); return cport_id; } /* * Returns zero if the message was successfully queued, or a negative errno * otherwise. */ static int message_send(struct gb_host_device *hd, u16 cport_id, struct gb_message *message, gfp_t gfp_mask) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; size_t buffer_size; int retval; struct urb *urb; unsigned long flags; /* * The data actually transferred will include an indication * of where the data should be sent. Do one last check of * the target CPort id before filling it in. */ if (!cport_id_valid(hd, cport_id)) { dev_err(&udev->dev, "invalid cport %u\n", cport_id); return -EINVAL; } /* Find a free urb */ urb = next_free_urb(es2, gfp_mask); if (!urb) return -ENOMEM; spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = urb; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* Pack the cport id into the message header */ gb_message_cport_pack(message->header, cport_id); buffer_size = sizeof(*message->header) + message->payload_size; usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, es2->cport_out_endpoint), message->buffer, buffer_size, cport_out_callback, message); urb->transfer_flags |= URB_ZERO_PACKET; trace_gb_message_submit(message); retval = usb_submit_urb(urb, gfp_mask); if (retval) { dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval); spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = NULL; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); free_urb(es2, urb); gb_message_cport_clear(message->header); return retval; } return 0; } /* * Can not be called in atomic context. */ static void message_cancel(struct gb_message *message) { struct gb_host_device *hd = message->operation->connection->hd; struct es2_ap_dev *es2 = hd_to_es2(hd); struct urb *urb; int i; might_sleep(); spin_lock_irq(&es2->cport_out_urb_lock); urb = message->hcpriv; /* Prevent dynamically allocated urb from being deallocated. */ usb_get_urb(urb); /* Prevent pre-allocated urb from being reused. */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (urb == es2->cport_out_urb[i]) { es2->cport_out_urb_cancelled[i] = true; break; } } spin_unlock_irq(&es2->cport_out_urb_lock); usb_kill_urb(urb); if (i < NUM_CPORT_OUT_URB) { spin_lock_irq(&es2->cport_out_urb_lock); es2->cport_out_urb_cancelled[i] = false; spin_unlock_irq(&es2->cport_out_urb_lock); } usb_free_urb(urb); } static int es2_cport_allocate(struct gb_host_device *hd, int cport_id, unsigned long flags) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct ida *id_map = &hd->cport_id_map; int ida_start, ida_end; switch (cport_id) { case ES2_CPORT_CDSI0: case ES2_CPORT_CDSI1: dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EBUSY; } if (flags & GB_CONNECTION_FLAG_OFFLOADED && flags & GB_CONNECTION_FLAG_CDSI1) { if (es2->cdsi1_in_use) { dev_err(&hd->dev, "CDSI1 already in use\n"); return -EBUSY; } es2->cdsi1_in_use = true; return ES2_CPORT_CDSI1; } if (cport_id < 0) { ida_start = 0; ida_end = hd->num_cports - 1; } else if (cport_id < hd->num_cports) { ida_start = cport_id; ida_end = cport_id; } else { dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EINVAL; } return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL); } static void es2_cport_release(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); switch (cport_id) { case ES2_CPORT_CDSI1: es2->cdsi1_in_use = false; return; } ida_free(&hd->cport_id_map, cport_id); } static int cport_enable(struct gb_host_device *hd, u16 cport_id, unsigned long flags) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; struct gb_apb_request_cport_flags *req; u32 connection_flags; int ret; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; connection_flags = 0; if (flags & GB_CONNECTION_FLAG_CONTROL) connection_flags |= GB_APB_CPORT_FLAG_CONTROL; if (flags & GB_CONNECTION_FLAG_HIGH_PRIO) connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO; req->flags = cpu_to_le32(connection_flags); dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__, cport_id, connection_flags); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_CPORT_FLAGS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, req, sizeof(*req), ES2_USB_CTRL_TIMEOUT); if (ret < 0) { dev_err(&udev->dev, "failed to set cport flags for port %d\n", cport_id); goto out; } ret = 0; out: kfree(req); return ret; } static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_connected_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to set connected state for cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_flush_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id, u8 phase, unsigned int timeout) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_shutdown_req req; int result; int ret; if (timeout > U16_MAX) return -EINVAL; req.cport_id = cpu_to_le16(cport_id); req.timeout = cpu_to_le16(timeout); req.phase = phase; ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req), &result, ES2_ARPC_CPORT_TIMEOUT + timeout); if (ret) { dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n", cport_id, ret, result); return ret; } return 0; } static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id, size_t peer_space, unsigned int timeout) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_quiesce_req req; int result; int ret; if (peer_space > U16_MAX) return -EINVAL; if (timeout > U16_MAX) return -EINVAL; req.cport_id = cpu_to_le16(cport_id); req.peer_space = cpu_to_le16(peer_space); req.timeout = cpu_to_le16(timeout); ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req), &result, ES2_ARPC_CPORT_TIMEOUT + timeout); if (ret) { dev_err(dev, "failed to quiesce cport %u: %d (%d)\n", cport_id, ret, result); return ret; } return 0; } static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_clear_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id) { int retval; struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_LATENCY_TAG_EN, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, NULL, 0, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n", cport_id); return retval; } static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id) { int retval; struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_LATENCY_TAG_DIS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, NULL, 0, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n", cport_id); return retval; } static struct gb_hd_driver es2_driver = { .hd_priv_size = sizeof(struct es2_ap_dev), .message_send = message_send, .message_cancel = message_cancel, .cport_allocate = es2_cport_allocate, .cport_release = es2_cport_release, .cport_enable = cport_enable, .cport_connected = es2_cport_connected, .cport_flush = es2_cport_flush, .cport_shutdown = es2_cport_shutdown, .cport_quiesce = es2_cport_quiesce, .cport_clear = es2_cport_clear, .latency_tag_enable = latency_tag_enable, .latency_tag_disable = latency_tag_disable, .output = output, }; /* Common function to report consistent warnings based on URB status */ static int check_urb_status(struct urb *urb) { struct device *dev = &urb->dev->dev; int status = urb->status; switch (status) { case 0: return 0; case -EOVERFLOW: dev_err(dev, "%s: overflow actual length is %d\n", __func__, urb->actual_length); fallthrough; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -EILSEQ: case -EPROTO: /* device is gone, stop sending */ return status; } dev_err(dev, "%s: unknown status %d\n", __func__, status); return -EAGAIN; } static void es2_destroy(struct es2_ap_dev *es2) { struct usb_device *udev; struct urb *urb; int i; debugfs_remove(es2->apb_log_enable_dentry); usb_log_disable(es2); /* Tear down everything! */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { urb = es2->cport_out_urb[i]; usb_kill_urb(urb); usb_free_urb(urb); es2->cport_out_urb[i] = NULL; es2->cport_out_urb_busy[i] = false; /* just to be anal */ } for (i = 0; i < NUM_ARPC_IN_URB; ++i) { usb_free_urb(es2->arpc_urb[i]); kfree(es2->arpc_buffer[i]); es2->arpc_buffer[i] = NULL; } for (i = 0; i < NUM_CPORT_IN_URB; ++i) { usb_free_urb(es2->cport_in.urb[i]); kfree(es2->cport_in.buffer[i]); es2->cport_in.buffer[i] = NULL; } /* release reserved CDSI0 and CDSI1 cports */ gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1); gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0); udev = es2->usb_dev; gb_hd_put(es2->hd); usb_put_dev(udev); } static void cport_in_callback(struct urb *urb) { struct gb_host_device *hd = urb->context; struct device *dev = &urb->dev->dev; struct gb_operation_msg_hdr *header; int status = check_urb_status(urb); int retval; u16 cport_id; if (status) { if ((status == -EAGAIN) || (status == -EPROTO)) goto exit; /* The urb is being unlinked */ if (status == -ENOENT || status == -ESHUTDOWN) return; dev_err(dev, "urb cport in error %d (dropped)\n", status); return; } if (urb->actual_length < sizeof(*header)) { dev_err(dev, "short message received\n"); goto exit; } /* Extract the CPort id, which is packed in the message header */ header = urb->transfer_buffer; cport_id = gb_message_cport_unpack(header); if (cport_id_valid(hd, cport_id)) { greybus_data_rcvd(hd, cport_id, urb->transfer_buffer, urb->actual_length); } else { dev_err(dev, "invalid cport id %u received\n", cport_id); } exit: /* put our urb back in the request pool */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "failed to resubmit in-urb: %d\n", retval); } static void cport_out_callback(struct urb *urb) { struct gb_message *message = urb->context; struct gb_host_device *hd = message->operation->connection->hd; struct es2_ap_dev *es2 = hd_to_es2(hd); int status = check_urb_status(urb); unsigned long flags; gb_message_cport_clear(message->header); spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = NULL; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* * Tell the submitter that the message send (attempt) is * complete, and report the status. */ greybus_message_sent(hd, message, status); free_urb(es2, urb); } static struct arpc *arpc_alloc(void *payload, u16 size, u8 type) { struct arpc *rpc; if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX) return NULL; rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); if (!rpc) return NULL; INIT_LIST_HEAD(&rpc->list); rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL); if (!rpc->req) goto err_free_rpc; rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL); if (!rpc->resp) goto err_free_req; rpc->req->type = type; rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size); memcpy(rpc->req->data, payload, size); init_completion(&rpc->response_received); return rpc; err_free_req: kfree(rpc->req); err_free_rpc: kfree(rpc); return NULL; } static void arpc_free(struct arpc *rpc) { kfree(rpc->req); kfree(rpc->resp); kfree(rpc); } static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id) { struct arpc *rpc; list_for_each_entry(rpc, &es2->arpcs, list) { if (rpc->req->id == id) return rpc; } return NULL; } static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc) { rpc->active = true; rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++); list_add_tail(&rpc->list, &es2->arpcs); } static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc) { if (rpc->active) { rpc->active = false; list_del(&rpc->list); } } static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout) { struct usb_device *udev = es2->usb_dev; int retval; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_ARPC_RUN, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, rpc->req, le16_to_cpu(rpc->req->size), ES2_USB_CTRL_TIMEOUT); if (retval < 0) { dev_err(&udev->dev, "failed to send ARPC request %d: %d\n", rpc->req->type, retval); return retval; } return 0; } static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, size_t size, int *result, unsigned int timeout) { struct arpc *rpc; unsigned long flags; int retval; if (result) *result = 0; rpc = arpc_alloc(payload, size, type); if (!rpc) return -ENOMEM; spin_lock_irqsave(&es2->arpc_lock, flags); arpc_add(es2, rpc); spin_unlock_irqrestore(&es2->arpc_lock, flags); retval = arpc_send(es2, rpc, timeout); if (retval) goto out_arpc_del; retval = wait_for_completion_interruptible_timeout( &rpc->response_received, msecs_to_jiffies(timeout)); if (retval <= 0) { if (!retval) retval = -ETIMEDOUT; goto out_arpc_del; } if (rpc->resp->result) { retval = -EREMOTEIO; if (result) *result = rpc->resp->result; } else { retval = 0; } out_arpc_del: spin_lock_irqsave(&es2->arpc_lock, flags); arpc_del(es2, rpc); spin_unlock_irqrestore(&es2->arpc_lock, flags); arpc_free(rpc); if (retval < 0 && retval != -EREMOTEIO) { dev_err(&es2->usb_dev->dev, "failed to execute ARPC: %d\n", retval); } return retval; } static void arpc_in_callback(struct urb *urb) { struct es2_ap_dev *es2 = urb->context; struct device *dev = &urb->dev->dev; int status = check_urb_status(urb); struct arpc *rpc; struct arpc_response_message *resp; unsigned long flags; int retval; if (status) { if ((status == -EAGAIN) || (status == -EPROTO)) goto exit; /* The urb is being unlinked */ if (status == -ENOENT || status == -ESHUTDOWN) return; dev_err(dev, "arpc in-urb error %d (dropped)\n", status); return; } if (urb->actual_length < sizeof(*resp)) { dev_err(dev, "short aprc response received\n"); goto exit; } resp = urb->transfer_buffer; spin_lock_irqsave(&es2->arpc_lock, flags); rpc = arpc_find(es2, resp->id); if (!rpc) { dev_err(dev, "invalid arpc response id received: %u\n", le16_to_cpu(resp->id)); spin_unlock_irqrestore(&es2->arpc_lock, flags); goto exit; } arpc_del(es2, rpc); memcpy(rpc->resp, resp, sizeof(*resp)); complete(&rpc->response_received); spin_unlock_irqrestore(&es2->arpc_lock, flags); exit: /* put our urb back in the request pool */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval); } #define APB1_LOG_MSG_SIZE 64 static void apb_log_get(struct es2_ap_dev *es2, char *buf) { int retval; do { retval = usb_control_msg(es2->usb_dev, usb_rcvctrlpipe(es2->usb_dev, 0), GB_APB_REQUEST_LOG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x00, 0x00, buf, APB1_LOG_MSG_SIZE, ES2_USB_CTRL_TIMEOUT); if (retval > 0) kfifo_in(&es2->apb_log_fifo, buf, retval); } while (retval > 0); } static int apb_log_poll(void *data) { struct es2_ap_dev *es2 = data; char *buf; buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; while (!kthread_should_stop()) { msleep(1000); apb_log_get(es2, buf); } kfree(buf); return 0; } static ssize_t apb_log_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { struct es2_ap_dev *es2 = file_inode(f)->i_private; ssize_t ret; size_t copied; char *tmp_buf; if (count > APB1_LOG_SIZE) count = APB1_LOG_SIZE; tmp_buf = kmalloc(count, GFP_KERNEL); if (!tmp_buf) return -ENOMEM; copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count); ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied); kfree(tmp_buf); return ret; } static const struct file_operations apb_log_fops = { .read = apb_log_read, }; static void usb_log_enable(struct es2_ap_dev *es2) { if (!IS_ERR_OR_NULL(es2->apb_log_task)) return; /* get log from APB1 */ es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log"); if (IS_ERR(es2->apb_log_task)) return; /* XXX We will need to rename this per APB */ es2->apb_log_dentry = debugfs_create_file("apb_log", 0444, gb_debugfs_get(), es2, &apb_log_fops); } static void usb_log_disable(struct es2_ap_dev *es2) { if (IS_ERR_OR_NULL(es2->apb_log_task)) return; debugfs_remove(es2->apb_log_dentry); es2->apb_log_dentry = NULL; kthread_stop(es2->apb_log_task); es2->apb_log_task = NULL; } static ssize_t apb_log_enable_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { struct es2_ap_dev *es2 = file_inode(f)->i_private; int enable = !IS_ERR_OR_NULL(es2->apb_log_task); char tmp_buf[3]; sprintf(tmp_buf, "%d\n", enable); return simple_read_from_buffer(buf, count, ppos, tmp_buf, 2); } static ssize_t apb_log_enable_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos) { int enable; ssize_t retval; struct es2_ap_dev *es2 = file_inode(f)->i_private; retval = kstrtoint_from_user(buf, count, 10, &enable); if (retval) return retval; if (enable) usb_log_enable(es2); else usb_log_disable(es2); return count; } static const struct file_operations apb_log_enable_fops = { .read = apb_log_enable_read, .write = apb_log_enable_write, }; static int apb_get_cport_count(struct usb_device *udev) { int retval; __le16 *cport_count; cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL); if (!cport_count) return -ENOMEM; retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), GB_APB_REQUEST_CPORT_COUNT, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, cport_count, sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT); if (retval != sizeof(*cport_count)) { dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", retval); if (retval >= 0) retval = -EIO; goto out; } retval = le16_to_cpu(*cport_count); /* We need to fit a CPort ID in one byte of a message header */ if (retval > U8_MAX) { retval = U8_MAX; dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n"); } out: kfree(cport_count); return retval; } /* * The ES2 USB Bridge device has 15 endpoints * 1 Control - usual USB stuff + AP -> APBridgeA messages * 7 Bulk IN - CPort data in * 7 Bulk OUT - CPort data out */ static int ap_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct es2_ap_dev *es2; struct gb_host_device *hd; struct usb_device *udev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; __u8 ep_addr; int retval; int i; int num_cports; bool bulk_out_found = false; bool bulk_in_found = false; bool arpc_in_found = false; udev = usb_get_dev(interface_to_usbdev(interface)); num_cports = apb_get_cport_count(udev); if (num_cports < 0) { usb_put_dev(udev); dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", num_cports); return num_cports; } hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX, num_cports); if (IS_ERR(hd)) { usb_put_dev(udev); return PTR_ERR(hd); } es2 = hd_to_es2(hd); es2->hd = hd; es2->usb_intf = interface; es2->usb_dev = udev; spin_lock_init(&es2->cport_out_urb_lock); INIT_KFIFO(es2->apb_log_fifo); usb_set_intfdata(interface, es2); /* * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated * dynamically. */ retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0); if (retval) goto error; retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1); if (retval) goto error; /* find all bulk endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; ep_addr = endpoint->bEndpointAddress; if (usb_endpoint_is_bulk_in(endpoint)) { if (!bulk_in_found) { es2->cport_in.endpoint = ep_addr; bulk_in_found = true; } else if (!arpc_in_found) { es2->arpc_endpoint_in = ep_addr; arpc_in_found = true; } else { dev_warn(&udev->dev, "Unused bulk IN endpoint found: 0x%02x\n", ep_addr); } continue; } if (usb_endpoint_is_bulk_out(endpoint)) { if (!bulk_out_found) { es2->cport_out_endpoint = ep_addr; bulk_out_found = true; } else { dev_warn(&udev->dev, "Unused bulk OUT endpoint found: 0x%02x\n", ep_addr); } continue; } dev_warn(&udev->dev, "Unknown endpoint type found, address 0x%02x\n", ep_addr); } if (!bulk_in_found || !arpc_in_found || !bulk_out_found) { dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n"); retval = -ENODEV; goto error; } /* Allocate buffers for our cport in messages */ for (i = 0; i < NUM_CPORT_IN_URB; ++i) { struct urb *urb; u8 *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->cport_in.urb[i] = urb; buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL); if (!buffer) { retval = -ENOMEM; goto error; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, es2->cport_in.endpoint), buffer, ES2_GBUF_MSG_SIZE_MAX, cport_in_callback, hd); es2->cport_in.buffer[i] = buffer; } /* Allocate buffers for ARPC in messages */ for (i = 0; i < NUM_ARPC_IN_URB; ++i) { struct urb *urb; u8 *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->arpc_urb[i] = urb; buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL); if (!buffer) { retval = -ENOMEM; goto error; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, es2->arpc_endpoint_in), buffer, ARPC_IN_SIZE_MAX, arpc_in_callback, es2); es2->arpc_buffer[i] = buffer; } /* Allocate urbs for our CPort OUT messages */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->cport_out_urb[i] = urb; es2->cport_out_urb_busy[i] = false; /* just to be anal */ } /* XXX We will need to rename this per APB */ es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable", 0644, gb_debugfs_get(), es2, &apb_log_enable_fops); INIT_LIST_HEAD(&es2->arpcs); spin_lock_init(&es2->arpc_lock); retval = es2_arpc_in_enable(es2); if (retval) goto error; retval = gb_hd_add(hd); if (retval) goto err_disable_arpc_in; retval = es2_cport_in_enable(es2, &es2->cport_in); if (retval) goto err_hd_del; return 0; err_hd_del: gb_hd_del(hd); err_disable_arpc_in: es2_arpc_in_disable(es2); error: es2_destroy(es2); return retval; } static void ap_disconnect(struct usb_interface *interface) { struct es2_ap_dev *es2 = usb_get_intfdata(interface); gb_hd_del(es2->hd); es2_cport_in_disable(es2, &es2->cport_in); es2_arpc_in_disable(es2); es2_destroy(es2); } static struct usb_driver es2_ap_driver = { .name = "es2_ap_driver", .probe = ap_probe, .disconnect = ap_disconnect, .id_table = id_table, .soft_unbind = 1, }; module_usb_driver(es2_ap_driver); MODULE_DESCRIPTION("Greybus AP USB driver for ES2 controller chips"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
495 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGETLB_INLINE_H #define _LINUX_HUGETLB_INLINE_H #ifdef CONFIG_HUGETLB_PAGE #include <linux/mm.h> static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); } #else static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return false; } #endif #endif
1 2 2 2 569 556 17 11 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 /* Copyright 2011, Siemens AG * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ /* Based on patches from Jon Smirl <jonsmirl@gmail.com> * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ieee802154.h> #include <linux/if_arp.h> #include <net/ipv6.h> #include <net/netdev_lock.h> #include "6lowpan_i.h" static int open_count; static const struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; static int lowpan_dev_init(struct net_device *ldev) { netdev_lockdep_set_classes(ldev); return 0; } static int lowpan_open(struct net_device *dev) { if (!open_count) lowpan_rx_init(); open_count++; return 0; } static int lowpan_stop(struct net_device *dev) { open_count--; if (!open_count) lowpan_rx_exit(); return 0; } static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n) { struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n)); /* default no short_addr is available for a neighbour */ neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC); return 0; } static int lowpan_get_iflink(const struct net_device *dev) { return READ_ONCE(lowpan_802154_dev(dev)->wdev->ifindex); } static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, .ndo_open = lowpan_open, .ndo_stop = lowpan_stop, .ndo_neigh_construct = lowpan_neigh_construct, .ndo_get_iflink = lowpan_get_iflink, }; static void lowpan_setup(struct net_device *ldev) { memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN); /* We need an ipv6hdr as minimum len when calling xmit */ ldev->hard_header_len = sizeof(struct ipv6hdr); ldev->flags = IFF_BROADCAST | IFF_MULTICAST; ldev->priv_flags |= IFF_NO_QUEUE; ldev->netdev_ops = &lowpan_netdev_ops; ldev->header_ops = &lowpan_header_ops; ldev->needs_free_netdev = true; ldev->netns_immutable = true; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) return -EINVAL; } return 0; } static int lowpan_newlink(struct net_device *ldev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct nlattr **tb = params->tb; struct net_device *wdev; int ret; ASSERT_RTNL(); pr_debug("adding new link\n"); if (!tb[IFLA_LINK]) return -EINVAL; if (params->link_net && !net_eq(params->link_net, dev_net(ldev))) return -EINVAL; /* find and hold wpan device */ wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK])); if (!wdev) return -ENODEV; if (wdev->type != ARPHRD_IEEE802154) { dev_put(wdev); return -EINVAL; } if (wdev->ieee802154_ptr->lowpan_dev) { dev_put(wdev); return -EBUSY; } lowpan_802154_dev(ldev)->wdev = wdev; /* Set the lowpan hardware address to the wpan hardware address. */ __dev_addr_set(ldev, wdev->dev_addr, IEEE802154_ADDR_LEN); /* We need headroom for possible wpan_dev_hard_header call and tailroom * for encryption/fcs handling. The lowpan interface will replace * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6 * header. */ ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN + wdev->needed_headroom; ldev->needed_tailroom = wdev->needed_tailroom; ldev->neigh_priv_len = sizeof(struct lowpan_802154_neigh); ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154); if (ret < 0) { dev_put(wdev); return ret; } wdev->ieee802154_ptr->lowpan_dev = ldev; return 0; } static void lowpan_dellink(struct net_device *ldev, struct list_head *head) { struct net_device *wdev = lowpan_802154_dev(ldev)->wdev; ASSERT_RTNL(); wdev->ieee802154_ptr->lowpan_dev = NULL; lowpan_unregister_netdevice(ldev); dev_put(wdev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, .validate = lowpan_validate, }; static inline int __init lowpan_netlink_init(void) { return rtnl_link_register(&lowpan_link_ops); } static inline void lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } static int lowpan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct wpan_dev *wpan_dev; if (ndev->type != ARPHRD_IEEE802154) return NOTIFY_DONE; wpan_dev = ndev->ieee802154_ptr; if (!wpan_dev) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: /* Check if wpan interface is unregistered that we * also delete possible lowpan interfaces which belongs * to the wpan interface. */ if (wpan_dev->lowpan_dev) lowpan_dellink(wpan_dev->lowpan_dev, NULL); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static struct notifier_block lowpan_dev_notifier = { .notifier_call = lowpan_device_event, }; static int __init lowpan_init_module(void) { int err = 0; err = lowpan_net_frag_init(); if (err < 0) goto out; err = lowpan_netlink_init(); if (err < 0) goto out_frag; err = register_netdevice_notifier(&lowpan_dev_notifier); if (err < 0) goto out_pack; return 0; out_pack: lowpan_netlink_fini(); out_frag: lowpan_net_frag_exit(); out: return err; } static void __exit lowpan_cleanup_module(void) { lowpan_netlink_fini(); lowpan_net_frag_exit(); unregister_netdevice_notifier(&lowpan_dev_notifier); } module_init(lowpan_init_module); module_exit(lowpan_cleanup_module); MODULE_DESCRIPTION("IPv6 over Low power Wireless Personal Area Network IEEE 802.15.4 core"); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("lowpan");
2795 2795 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/pm_qos.h> static inline void device_pm_init_common(struct device *dev) { if (!dev->power.early_init) { spin_lock_init(&dev->power.lock); dev->power.qos = NULL; dev->power.early_init = true; } } #ifdef CONFIG_PM static inline void pm_runtime_early_init(struct device *dev) { dev->power.disable_depth = 1; device_pm_init_common(dev); } extern void pm_runtime_init(struct device *dev); extern void pm_runtime_reinit(struct device *dev); extern void pm_runtime_remove(struct device *dev); extern u64 pm_runtime_active_time(struct device *dev); #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) #define WAKE_IRQ_DEDICATED_REVERSE BIT(2) #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ WAKE_IRQ_DEDICATED_MANAGED | \ WAKE_IRQ_DEDICATED_REVERSE) #define WAKE_IRQ_DEDICATED_ENABLED BIT(3) struct wake_irq { struct device *dev; unsigned int status; int irq; const char *name; }; extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); extern void dev_pm_enable_wake_irq_check(struct device *dev, bool can_change_status); extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable); extern void dev_pm_enable_wake_irq_complete(struct device *dev); #ifdef CONFIG_PM_SLEEP extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq); extern void device_wakeup_detach_irq(struct device *dev); extern void device_wakeup_arm_wake_irqs(void); extern void device_wakeup_disarm_wake_irqs(void); #else static inline void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq) {} static inline void device_wakeup_detach_irq(struct device *dev) { } #endif /* CONFIG_PM_SLEEP */ /* * sysfs.c */ extern int dpm_sysfs_add(struct device *dev); extern void dpm_sysfs_remove(struct device *dev); extern void rpm_sysfs_remove(struct device *dev); extern int wakeup_sysfs_add(struct device *dev); extern void wakeup_sysfs_remove(struct device *dev); extern int pm_qos_sysfs_add_resume_latency(struct device *dev); extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev); extern int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); #else /* CONFIG_PM */ static inline void pm_runtime_early_init(struct device *dev) { device_pm_init_common(dev); } static inline void pm_runtime_init(struct device *dev) {} static inline void pm_runtime_reinit(struct device *dev) {} static inline void pm_runtime_remove(struct device *dev) {} static inline int dpm_sysfs_add(struct device *dev) { return 0; } static inline void dpm_sysfs_remove(struct device *dev) {} static inline int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { return 0; } #endif #ifdef CONFIG_PM_SLEEP /* kernel/power/main.c */ extern int pm_async_enabled; /* drivers/base/power/main.c */ extern struct list_head dpm_list; /* The active device list */ static inline struct device *to_device(struct list_head *entry) { return container_of(entry, struct device, power.entry); } extern void device_pm_sleep_init(struct device *dev); extern void device_pm_add(struct device *); extern void device_pm_remove(struct device *); extern void device_pm_move_before(struct device *, struct device *); extern void device_pm_move_after(struct device *, struct device *); extern void device_pm_move_last(struct device *); extern void device_pm_check_callbacks(struct device *dev); static inline bool device_pm_initialized(struct device *dev) { return dev->power.in_dpm_list; } /* drivers/base/power/wakeup_stats.c */ extern int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws); extern void wakeup_source_sysfs_remove(struct wakeup_source *ws); extern int pm_wakeup_source_sysfs_add(struct device *parent); #else /* !CONFIG_PM_SLEEP */ static inline void device_pm_sleep_init(struct device *dev) {} static inline void device_pm_add(struct device *dev) {} static inline void device_pm_remove(struct device *dev) { pm_runtime_remove(dev); } static inline void device_pm_move_before(struct device *deva, struct device *devb) {} static inline void device_pm_move_after(struct device *deva, struct device *devb) {} static inline void device_pm_move_last(struct device *dev) {} static inline void device_pm_check_callbacks(struct device *dev) {} static inline bool device_pm_initialized(struct device *dev) { return device_is_registered(dev); } static inline int pm_wakeup_source_sysfs_add(struct device *parent) { return 0; } #endif /* !CONFIG_PM_SLEEP */ static inline void device_pm_init(struct device *dev) { device_pm_init_common(dev); device_pm_sleep_init(dev); pm_runtime_init(dev); }
12 13 20 9 1 1 1 1 1 1 1 7 8 11 39 26 65 65 75 69 2 75 75 27 75 76 17 7 10 2 17 2 17 17 17 17 17 13 11 2 10 13 10 47 25 2036 14 12 3 3 15 4 12 12 134 136 136 134 136 135 55 68 25 19 29 69 69 69 68 68 56 69 69 69 68 69 69 14 14 14 13 1 10 14 2 2 2 2 54 121 55 56 34 23 76 76 76 75 76 76 13 5 8 76 76 76 76 76 13 2 11 76 76 26 69 2 2 2 56 2 57 103 104 103 104 104 104 252 252 253 104 138 139 139 73 73 72 73 1 73 54 73 73 73 67 15 67 11 1 11 11 67 79 32 66 25 25 25 7 6 11 4 24 1 25 9 11 9 25 25 25 5 4 4 4 12 12 12 12 12 15 15 15 15 1 5 15 9 12 12 12 7 5 4 15 4 13 2 14 6 14 14 10 1 12 14 13 2 53 53 13 6 46 6 6 47 47 46 2 8 46 12 47 6 25 25 24 1 1 1 92 92 140 134 16 138 118 25 10 47 113 5 62 112 63 111 112 5 112 4 111 144 3 136 134 136 111 112 112 7 1 7 7 2 6 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/buffer.c * * Copyright (C) 1991, 1992, 2002 Linus Torvalds */ /* * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 * * Removed a lot of unnecessary code and simplified things now that * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 * * Speed up hash, lru, and free list operations. Use gfp() for allocating * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM * * Added 32k buffer block sizes - these are required older ARM systems. - RMK * * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/mm.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/capability.h> #include <linux/blkdev.h> #include <linux/file.h> #include <linux/quotaops.h> #include <linux/highmem.h> #include <linux/export.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #include <linux/hash.h> #include <linux/suspend.h> #include <linux/buffer_head.h> #include <linux/task_io_accounting_ops.h> #include <linux/bio.h> #include <linux/cpu.h> #include <linux/bitops.h> #include <linux/mpage.h> #include <linux/bit_spinlock.h> #include <linux/pagevec.h> #include <linux/sched/mm.h> #include <trace/events/block.h> #include <linux/fscrypt.h> #include <linux/fsverity.h> #include <linux/sched/isolation.h> #include "internal.h" static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, enum rw_hint hint, struct writeback_control *wbc); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) inline void touch_buffer(struct buffer_head *bh) { trace_block_touch_buffer(bh); folio_mark_accessed(bh->b_folio); } EXPORT_SYMBOL(touch_buffer); void __lock_buffer(struct buffer_head *bh) { wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_buffer); void unlock_buffer(struct buffer_head *bh) { clear_bit_unlock(BH_Lock, &bh->b_state); smp_mb__after_atomic(); wake_up_bit(&bh->b_state, BH_Lock); } EXPORT_SYMBOL(unlock_buffer); /* * Returns if the folio has dirty or writeback buffers. If all the buffers * are unlocked and clean then the folio_test_dirty information is stale. If * any of the buffers are locked, it is assumed they are locked for IO. */ void buffer_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback) { struct buffer_head *head, *bh; *dirty = false; *writeback = false; BUG_ON(!folio_test_locked(folio)); head = folio_buffers(folio); if (!head) return; if (folio_test_writeback(folio)) *writeback = true; bh = head; do { if (buffer_locked(bh)) *writeback = true; if (buffer_dirty(bh)) *dirty = true; bh = bh->b_this_page; } while (bh != head); } /* * Block until a buffer comes unlocked. This doesn't stop it * from becoming locked again - you have to lock it yourself * if you want to preserve its state. */ void __wait_on_buffer(struct buffer_head * bh) { wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__wait_on_buffer); static void buffer_io_error(struct buffer_head *bh, char *msg) { if (!test_bit(BH_Quiet, &bh->b_state)) printk_ratelimited(KERN_ERR "Buffer I/O error on dev %pg, logical block %llu%s\n", bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); } /* * End-of-IO handler helper function which does not touch the bh after * unlocking it. * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but * a race there is benign: unlock_buffer() only use the bh's address for * hashing after unlocking the buffer, so it doesn't actually touch the bh * itself. */ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); } else { /* This happens, due to failed read-ahead attempts. */ clear_buffer_uptodate(bh); } unlock_buffer(bh); } /* * Default synchronous end-of-IO handler.. Just mark it up-to-date and * unlock the buffer. */ void end_buffer_read_sync(struct buffer_head *bh, int uptodate) { put_bh(bh); __end_buffer_read_notouch(bh, uptodate); } EXPORT_SYMBOL(end_buffer_read_sync); void end_buffer_write_sync(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); } else { buffer_io_error(bh, ", lost sync page write"); mark_buffer_write_io_error(bh); clear_buffer_uptodate(bh); } unlock_buffer(bh); put_bh(bh); } EXPORT_SYMBOL(end_buffer_write_sync); static struct buffer_head * __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic) { struct address_space *bd_mapping = bdev->bd_mapping; const int blkbits = bd_mapping->host->i_blkbits; struct buffer_head *ret = NULL; pgoff_t index; struct buffer_head *bh; struct buffer_head *head; struct folio *folio; int all_mapped = 1; static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); index = ((loff_t)block << blkbits) / PAGE_SIZE; folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); if (IS_ERR(folio)) goto out; /* * Folio lock protects the buffers. Callers that cannot block * will fallback to serializing vs try_to_free_buffers() via * the i_private_lock. */ if (atomic) spin_lock(&bd_mapping->i_private_lock); else folio_lock(folio); head = folio_buffers(folio); if (!head) goto out_unlock; /* * Upon a noref migration, the folio lock serializes here; * otherwise bail. */ if (test_bit_acquire(BH_Migrate, &head->b_state)) { WARN_ON(!atomic); goto out_unlock; } bh = head; do { if (!buffer_mapped(bh)) all_mapped = 0; else if (bh->b_blocknr == block) { ret = bh; get_bh(bh); goto out_unlock; } bh = bh->b_this_page; } while (bh != head); /* we might be here because some of the buffers on this page are * not mapped. This is due to various races between * file io on the block device and getblk. It gets dealt with * elsewhere, don't buffer_error if we had some unmapped buffers */ ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); if (all_mapped && __ratelimit(&last_warned)) { printk("__find_get_block_slow() failed. block=%llu, " "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " "device %pg blocksize: %d\n", (unsigned long long)block, (unsigned long long)bh->b_blocknr, bh->b_state, bh->b_size, bdev, 1 << blkbits); } out_unlock: if (atomic) spin_unlock(&bd_mapping->i_private_lock); else folio_unlock(folio); folio_put(folio); out: return ret; } static void end_buffer_async_read(struct buffer_head *bh, int uptodate) { unsigned long flags; struct buffer_head *first; struct buffer_head *tmp; struct folio *folio; int folio_uptodate = 1; BUG_ON(!buffer_async_read(bh)); folio = bh->b_folio; if (uptodate) { set_buffer_uptodate(bh); } else { clear_buffer_uptodate(bh); buffer_io_error(bh, ", async page read"); } /* * Be _very_ careful from here on. Bad things can happen if * two buffer heads end IO at almost the same time and both * decide that the page is now completely done. */ first = folio_buffers(folio); spin_lock_irqsave(&first->b_uptodate_lock, flags); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; do { if (!buffer_uptodate(tmp)) folio_uptodate = 0; if (buffer_async_read(tmp)) { BUG_ON(!buffer_locked(tmp)); goto still_busy; } tmp = tmp->b_this_page; } while (tmp != bh); spin_unlock_irqrestore(&first->b_uptodate_lock, flags); folio_end_read(folio, folio_uptodate); return; still_busy: spin_unlock_irqrestore(&first->b_uptodate_lock, flags); } struct postprocess_bh_ctx { struct work_struct work; struct buffer_head *bh; }; static void verify_bh(struct work_struct *work) { struct postprocess_bh_ctx *ctx = container_of(work, struct postprocess_bh_ctx, work); struct buffer_head *bh = ctx->bh; bool valid; valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); end_buffer_async_read(bh, valid); kfree(ctx); } static bool need_fsverity(struct buffer_head *bh) { struct folio *folio = bh->b_folio; struct inode *inode = folio->mapping->host; return fsverity_active(inode) && /* needed by ext4 */ folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); } static void decrypt_bh(struct work_struct *work) { struct postprocess_bh_ctx *ctx = container_of(work, struct postprocess_bh_ctx, work); struct buffer_head *bh = ctx->bh; int err; err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); if (err == 0 && need_fsverity(bh)) { /* * We use different work queues for decryption and for verity * because verity may require reading metadata pages that need * decryption, and we shouldn't recurse to the same workqueue. */ INIT_WORK(&ctx->work, verify_bh); fsverity_enqueue_verify_work(&ctx->work); return; } end_buffer_async_read(bh, err == 0); kfree(ctx); } /* * I/O completion handler for block_read_full_folio() - pages * which come unlocked at the end of I/O. */ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) { struct inode *inode = bh->b_folio->mapping->host; bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); bool verify = need_fsverity(bh); /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ if (uptodate && (decrypt || verify)) { struct postprocess_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx) { ctx->bh = bh; if (decrypt) { INIT_WORK(&ctx->work, decrypt_bh); fscrypt_enqueue_decrypt_work(&ctx->work); } else { INIT_WORK(&ctx->work, verify_bh); fsverity_enqueue_verify_work(&ctx->work); } return; } uptodate = 0; } end_buffer_async_read(bh, uptodate); } /* * Completion handler for block_write_full_folio() - folios which are unlocked * during I/O, and which have the writeback flag cleared upon I/O completion. */ static void end_buffer_async_write(struct buffer_head *bh, int uptodate) { unsigned long flags; struct buffer_head *first; struct buffer_head *tmp; struct folio *folio; BUG_ON(!buffer_async_write(bh)); folio = bh->b_folio; if (uptodate) { set_buffer_uptodate(bh); } else { buffer_io_error(bh, ", lost async page write"); mark_buffer_write_io_error(bh); clear_buffer_uptodate(bh); } first = folio_buffers(folio); spin_lock_irqsave(&first->b_uptodate_lock, flags); clear_buffer_async_write(bh); unlock_buffer(bh); tmp = bh->b_this_page; while (tmp != bh) { if (buffer_async_write(tmp)) { BUG_ON(!buffer_locked(tmp)); goto still_busy; } tmp = tmp->b_this_page; } spin_unlock_irqrestore(&first->b_uptodate_lock, flags); folio_end_writeback(folio); return; still_busy: spin_unlock_irqrestore(&first->b_uptodate_lock, flags); } /* * If a page's buffers are under async readin (end_buffer_async_read * completion) then there is a possibility that another thread of * control could lock one of the buffers after it has completed * but while some of the other buffers have not completed. This * locked buffer would confuse end_buffer_async_read() into not unlocking * the page. So the absence of BH_Async_Read tells end_buffer_async_read() * that this buffer is not under async I/O. * * The page comes unlocked when it has no locked buffer_async buffers * left. * * PageLocked prevents anyone starting new async I/O reads any of * the buffers. * * PageWriteback is used to prevent simultaneous writeout of the same * page. * * PageLocked prevents anyone from starting writeback of a page which is * under read I/O (PageWriteback is only ever set against a locked page). */ static void mark_buffer_async_read(struct buffer_head *bh) { bh->b_end_io = end_buffer_async_read_io; set_buffer_async_read(bh); } static void mark_buffer_async_write_endio(struct buffer_head *bh, bh_end_io_t *handler) { bh->b_end_io = handler; set_buffer_async_write(bh); } void mark_buffer_async_write(struct buffer_head *bh) { mark_buffer_async_write_endio(bh, end_buffer_async_write); } EXPORT_SYMBOL(mark_buffer_async_write); /* * fs/buffer.c contains helper functions for buffer-backed address space's * fsync functions. A common requirement for buffer-based filesystems is * that certain data from the backing blockdev needs to be written out for * a successful fsync(). For example, ext2 indirect blocks need to be * written back and waited upon before fsync() returns. * * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(), * inode_has_buffers() and invalidate_inode_buffers() are provided for the * management of a list of dependent buffers at ->i_mapping->i_private_list. * * Locking is a little subtle: try_to_free_buffers() will remove buffers * from their controlling inode's queue when they are being freed. But * try_to_free_buffers() will be operating against the *blockdev* mapping * at the time, not against the S_ISREG file which depends on those buffers. * So the locking for i_private_list is via the i_private_lock in the address_space * which backs the buffers. Which is different from the address_space * against which the buffers are listed. So for a particular address_space, * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact, * mapping->i_private_list will always be protected by the backing blockdev's * ->i_private_lock. * * Which introduces a requirement: all buffers on an address_space's * ->i_private_list must be from the same address_space: the blockdev's. * * address_spaces which do not place buffers at ->i_private_list via these * utility functions are free to use i_private_lock and i_private_list for * whatever they want. The only requirement is that list_empty(i_private_list) * be true at clear_inode() time. * * FIXME: clear_inode should not call invalidate_inode_buffers(). The * filesystems should do that. invalidate_inode_buffers() should just go * BUG_ON(!list_empty). * * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should * take an address_space, not an inode. And it should be called * mark_buffer_dirty_fsync() to clearly define why those buffers are being * queued up. * * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the * list if it is already on a list. Because if the buffer is on a list, * it *must* already be on the right one. If not, the filesystem is being * silly. This will save a ton of locking. But first we have to ensure * that buffers are taken *off* the old inode's list when they are freed * (presumably in truncate). That requires careful auditing of all * filesystems (do it inside bforget()). It could also be done by bringing * b_inode back. */ /* * The buffer's backing address_space's i_private_lock must be held */ static void __remove_assoc_queue(struct buffer_head *bh) { list_del_init(&bh->b_assoc_buffers); WARN_ON(!bh->b_assoc_map); bh->b_assoc_map = NULL; } int inode_has_buffers(struct inode *inode) { return !list_empty(&inode->i_data.i_private_list); } /* * osync is designed to support O_SYNC io. It waits synchronously for * all already-submitted IO to complete, but does not queue any new * writes to the disk. * * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer * as you dirty the buffers, and then use osync_inode_buffers to wait for * completion. Any other dirty buffers which are not yet queued for * write will not be flushed to disk by the osync. */ static int osync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head *p; int err = 0; spin_lock(lock); repeat: list_for_each_prev(p, list) { bh = BH_ENTRY(p); if (buffer_locked(bh)) { get_bh(bh); spin_unlock(lock); wait_on_buffer(bh); if (!buffer_uptodate(bh)) err = -EIO; brelse(bh); spin_lock(lock); goto repeat; } } spin_unlock(lock); return err; } /** * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers * @mapping: the mapping which wants those buffers written * * Starts I/O against the buffers at mapping->i_private_list, and waits upon * that I/O. * * Basically, this is a convenience function for fsync(). * @mapping is a file or directory which needs those buffers to be written for * a successful fsync(). */ int sync_mapping_buffers(struct address_space *mapping) { struct address_space *buffer_mapping = mapping->i_private_data; if (buffer_mapping == NULL || list_empty(&mapping->i_private_list)) return 0; return fsync_buffers_list(&buffer_mapping->i_private_lock, &mapping->i_private_list); } EXPORT_SYMBOL(sync_mapping_buffers); /** * generic_buffers_fsync_noflush - generic buffer fsync implementation * for simple filesystems with no inode lock * * @file: file to synchronize * @start: start offset in bytes * @end: end offset in bytes (inclusive) * @datasync: only synchronize essential metadata if true * * This is a generic implementation of the fsync method for simple * filesystems which track all non-inode metadata in the buffers list * hanging off the address_space structure. */ int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, bool datasync) { struct inode *inode = file->f_mapping->host; int err; int ret; err = file_write_and_wait_range(file, start, end); if (err) return err; ret = sync_mapping_buffers(inode->i_mapping); if (!(inode->i_state & I_DIRTY_ALL)) goto out; if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) goto out; err = sync_inode_metadata(inode, 1); if (ret == 0) ret = err; out: /* check and advance again to catch errors after syncing out buffers */ err = file_check_and_advance_wb_err(file); if (ret == 0) ret = err; return ret; } EXPORT_SYMBOL(generic_buffers_fsync_noflush); /** * generic_buffers_fsync - generic buffer fsync implementation * for simple filesystems with no inode lock * * @file: file to synchronize * @start: start offset in bytes * @end: end offset in bytes (inclusive) * @datasync: only synchronize essential metadata if true * * This is a generic implementation of the fsync method for simple * filesystems which track all non-inode metadata in the buffers list * hanging off the address_space structure. This also makes sure that * a device cache flush operation is called at the end. */ int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, bool datasync) { struct inode *inode = file->f_mapping->host; int ret; ret = generic_buffers_fsync_noflush(file, start, end, datasync); if (!ret) ret = blkdev_issue_flush(inode->i_sb->s_bdev); return ret; } EXPORT_SYMBOL(generic_buffers_fsync); /* * Called when we've recently written block `bblock', and it is known that * `bblock' was for a buffer_boundary() buffer. This means that the block at * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's * dirty, schedule it for IO. So that indirects merge nicely with their data. */ void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize) { struct buffer_head *bh; bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize); if (bh) { if (buffer_dirty(bh)) write_dirty_buffer(bh, 0); put_bh(bh); } } void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) { struct address_space *mapping = inode->i_mapping; struct address_space *buffer_mapping = bh->b_folio->mapping; mark_buffer_dirty(bh); if (!mapping->i_private_data) { mapping->i_private_data = buffer_mapping; } else { BUG_ON(mapping->i_private_data != buffer_mapping); } if (!bh->b_assoc_map) { spin_lock(&buffer_mapping->i_private_lock); list_move_tail(&bh->b_assoc_buffers, &mapping->i_private_list); bh->b_assoc_map = mapping; spin_unlock(&buffer_mapping->i_private_lock); } } EXPORT_SYMBOL(mark_buffer_dirty_inode); /** * block_dirty_folio - Mark a folio as dirty. * @mapping: The address space containing this folio. * @folio: The folio to mark dirty. * * Filesystems which use buffer_heads can use this function as their * ->dirty_folio implementation. Some filesystems need to do a little * work before calling this function. Filesystems which do not use * buffer_heads should call filemap_dirty_folio() instead. * * If the folio has buffers, the uptodate buffers are set dirty, to * preserve dirty-state coherency between the folio and the buffers. * Buffers added to a dirty folio are created dirty. * * The buffers are dirtied before the folio is dirtied. There's a small * race window in which writeback may see the folio cleanness but not the * buffer dirtiness. That's fine. If this code were to set the folio * dirty before the buffers, writeback could clear the folio dirty flag, * see a bunch of clean buffers and we'd end up with dirty buffers/clean * folio on the dirty folio list. * * We use i_private_lock to lock against try_to_free_buffers() while * using the folio's buffer list. This also prevents clean buffers * being added to the folio after it was set dirty. * * Context: May only be called from process context. Does not sleep. * Caller must ensure that @folio cannot be truncated during this call, * typically by holding the folio lock or having a page in the folio * mapped and holding the page table lock. * * Return: True if the folio was dirtied; false if it was already dirtied. */ bool block_dirty_folio(struct address_space *mapping, struct folio *folio) { struct buffer_head *head; bool newly_dirty; spin_lock(&mapping->i_private_lock); head = folio_buffers(folio); if (head) { struct buffer_head *bh = head; do { set_buffer_dirty(bh); bh = bh->b_this_page; } while (bh != head); } /* * Lock out page's memcg migration to keep PageDirty * synchronized with per-memcg dirty page counters. */ newly_dirty = !folio_test_set_dirty(folio); spin_unlock(&mapping->i_private_lock); if (newly_dirty) __folio_mark_dirty(folio, mapping, 1); if (newly_dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); return newly_dirty; } EXPORT_SYMBOL(block_dirty_folio); /* * Write out and wait upon a list of buffers. * * We have conflicting pressures: we want to make sure that all * initially dirty buffers get waited on, but that any subsequently * dirtied buffers don't. After all, we don't want fsync to last * forever if somebody is actively writing to the file. * * Do this in two main stages: first we copy dirty buffers to a * temporary inode list, queueing the writes as we go. Then we clean * up, waiting for those writes to complete. * * During this second stage, any subsequent updates to the file may end * up refiling the buffer on the original inode's dirty list again, so * there is a chance we will end up with a buffer queued for write but * not yet completed on that list. So, as a final cleanup we go through * the osync code to catch these locked, dirty buffers without requeuing * any newly dirty buffers for write. */ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct address_space *mapping; int err = 0, err2; struct blk_plug plug; LIST_HEAD(tmp); blk_start_plug(&plug); spin_lock(lock); while (!list_empty(list)) { bh = BH_ENTRY(list->next); mapping = bh->b_assoc_map; __remove_assoc_queue(bh); /* Avoid race with mark_buffer_dirty_inode() which does * a lockless check and we rely on seeing the dirty bit */ smp_mb(); if (buffer_dirty(bh) || buffer_locked(bh)) { list_add(&bh->b_assoc_buffers, &tmp); bh->b_assoc_map = mapping; if (buffer_dirty(bh)) { get_bh(bh); spin_unlock(lock); /* * Ensure any pending I/O completes so that * write_dirty_buffer() actually writes the * current contents - it is a noop if I/O is * still in flight on potentially older * contents. */ write_dirty_buffer(bh, REQ_SYNC); /* * Kick off IO for the previous mapping. Note * that we will not run the very last mapping, * wait_on_buffer() will do that for us * through sync_buffer(). */ brelse(bh); spin_lock(lock); } } } spin_unlock(lock); blk_finish_plug(&plug); spin_lock(lock); while (!list_empty(&tmp)) { bh = BH_ENTRY(tmp.prev); get_bh(bh); mapping = bh->b_assoc_map; __remove_assoc_queue(bh); /* Avoid race with mark_buffer_dirty_inode() which does * a lockless check and we rely on seeing the dirty bit */ smp_mb(); if (buffer_dirty(bh)) { list_add(&bh->b_assoc_buffers, &mapping->i_private_list); bh->b_assoc_map = mapping; } spin_unlock(lock); wait_on_buffer(bh); if (!buffer_uptodate(bh)) err = -EIO; brelse(bh); spin_lock(lock); } spin_unlock(lock); err2 = osync_buffers_list(lock, list); if (err) return err; else return err2; } /* * Invalidate any and all dirty buffers on a given inode. We are * probably unmounting the fs, but that doesn't mean we have already * done a sync(). Just drop the buffers from the inode list. * * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which * assumes that all the buffers are against the blockdev. */ void invalidate_inode_buffers(struct inode *inode) { if (inode_has_buffers(inode)) { struct address_space *mapping = &inode->i_data; struct list_head *list = &mapping->i_private_list; struct address_space *buffer_mapping = mapping->i_private_data; spin_lock(&buffer_mapping->i_private_lock); while (!list_empty(list)) __remove_assoc_queue(BH_ENTRY(list->next)); spin_unlock(&buffer_mapping->i_private_lock); } } EXPORT_SYMBOL(invalidate_inode_buffers); /* * Remove any clean buffers from the inode's buffer list. This is called * when we're trying to free the inode itself. Those buffers can pin it. * * Returns true if all buffers were removed. */ int remove_inode_buffers(struct inode *inode) { int ret = 1; if (inode_has_buffers(inode)) { struct address_space *mapping = &inode->i_data; struct list_head *list = &mapping->i_private_list; struct address_space *buffer_mapping = mapping->i_private_data; spin_lock(&buffer_mapping->i_private_lock); while (!list_empty(list)) { struct buffer_head *bh = BH_ENTRY(list->next); if (buffer_dirty(bh)) { ret = 0; break; } __remove_assoc_queue(bh); } spin_unlock(&buffer_mapping->i_private_lock); } return ret; } /* * Create the appropriate buffers when given a folio for data area and * the size of each buffer.. Use the bh->b_this_page linked list to * follow the buffers created. Return NULL if unable to create more * buffers. * * The retry flag is used to differentiate async IO (paging, swapping) * which may not fail from ordinary buffer allocations. */ struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, gfp_t gfp) { struct buffer_head *bh, *head; long offset; struct mem_cgroup *memcg, *old_memcg; /* The folio lock pins the memcg */ memcg = folio_memcg(folio); old_memcg = set_active_memcg(memcg); head = NULL; offset = folio_size(folio); while ((offset -= size) >= 0) { bh = alloc_buffer_head(gfp); if (!bh) goto no_grow; bh->b_this_page = head; bh->b_blocknr = -1; head = bh; bh->b_size = size; /* Link the buffer to its folio */ folio_set_bh(bh, folio, offset); } out: set_active_memcg(old_memcg); return head; /* * In case anything failed, we just free everything we got. */ no_grow: if (head) { do { bh = head; head = head->b_this_page; free_buffer_head(bh); } while (head); } goto out; } EXPORT_SYMBOL_GPL(folio_alloc_buffers); struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size) { gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; return folio_alloc_buffers(page_folio(page), size, gfp); } EXPORT_SYMBOL_GPL(alloc_page_buffers); static inline void link_dev_buffers(struct folio *folio, struct buffer_head *head) { struct buffer_head *bh, *tail; bh = head; do { tail = bh; bh = bh->b_this_page; } while (bh); tail->b_this_page = head; folio_attach_private(folio, head); } static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) { sector_t retval = ~((sector_t)0); loff_t sz = bdev_nr_bytes(bdev); if (sz) { unsigned int sizebits = blksize_bits(size); retval = (sz >> sizebits); } return retval; } /* * Initialise the state of a blockdev folio's buffers. */ static sector_t folio_init_buffers(struct folio *folio, struct block_device *bdev, unsigned size) { struct buffer_head *head = folio_buffers(folio); struct buffer_head *bh = head; bool uptodate = folio_test_uptodate(folio); sector_t block = div_u64(folio_pos(folio), size); sector_t end_block = blkdev_max_block(bdev, size); do { if (!buffer_mapped(bh)) { bh->b_end_io = NULL; bh->b_private = NULL; bh->b_bdev = bdev; bh->b_blocknr = block; if (uptodate) set_buffer_uptodate(bh); if (block < end_block) set_buffer_mapped(bh); } block++; bh = bh->b_this_page; } while (bh != head); /* * Caller needs to validate requested block against end of device. */ return end_block; } /* * Create the page-cache folio that contains the requested block. * * This is used purely for blockdev mappings. * * Returns false if we have a failure which cannot be cured by retrying * without sleeping. Returns true if we succeeded, or the caller should retry. */ static bool grow_dev_folio(struct block_device *bdev, sector_t block, pgoff_t index, unsigned size, gfp_t gfp) { struct address_space *mapping = bdev->bd_mapping; struct folio *folio; struct buffer_head *bh; sector_t end_block = 0; folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); if (IS_ERR(folio)) return false; bh = folio_buffers(folio); if (bh) { if (bh->b_size == size) { end_block = folio_init_buffers(folio, bdev, size); goto unlock; } /* * Retrying may succeed; for example the folio may finish * writeback, or buffers may be cleaned. This should not * happen very often; maybe we have old buffers attached to * this blockdev's page cache and we're trying to change * the block size? */ if (!try_to_free_buffers(folio)) { end_block = ~0ULL; goto unlock; } } bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT); if (!bh) goto unlock; /* * Link the folio to the buffers and initialise them. Take the * lock to be atomic wrt __find_get_block(), which does not * run under the folio lock. */ spin_lock(&mapping->i_private_lock); link_dev_buffers(folio, bh); end_block = folio_init_buffers(folio, bdev, size); spin_unlock(&mapping->i_private_lock); unlock: folio_unlock(folio); folio_put(folio); return block < end_block; } /* * Create buffers for the specified block device block's folio. If * that folio was dirty, the buffers are set dirty also. Returns false * if we've hit a permanent error. */ static bool grow_buffers(struct block_device *bdev, sector_t block, unsigned size, gfp_t gfp) { loff_t pos; /* * Check for a block which lies outside our maximum possible * pagecache index. */ if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) { printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n", __func__, (unsigned long long)block, bdev); return false; } /* Create a folio with the proper size buffers */ return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp); } static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, unsigned size, gfp_t gfp) { bool blocking = gfpflags_allow_blocking(gfp); if (WARN_ON_ONCE(!IS_ALIGNED(size, bdev_logical_block_size(bdev)))) { printk(KERN_ERR "getblk(): block size %d not aligned to logical block size %d\n", size, bdev_logical_block_size(bdev)); return NULL; } for (;;) { struct buffer_head *bh; if (!grow_buffers(bdev, block, size, gfp)) return NULL; if (blocking) bh = __find_get_block_nonatomic(bdev, block, size); else bh = __find_get_block(bdev, block, size); if (bh) return bh; } } /* * The relationship between dirty buffers and dirty pages: * * Whenever a page has any dirty buffers, the page's dirty bit is set, and * the page is tagged dirty in the page cache. * * At all times, the dirtiness of the buffers represents the dirtiness of * subsections of the page. If the page has buffers, the page dirty bit is * merely a hint about the true dirty state. * * When a page is set dirty in its entirety, all its buffers are marked dirty * (if the page has buffers). * * When a buffer is marked dirty, its page is dirtied, but the page's other * buffers are not. * * Also. When blockdev buffers are explicitly read with bread(), they * individually become uptodate. But their backing page remains not * uptodate - even if all of its buffers are uptodate. A subsequent * block_read_full_folio() against that folio will discover all the uptodate * buffers, will set the folio uptodate and will perform no I/O. */ /** * mark_buffer_dirty - mark a buffer_head as needing writeout * @bh: the buffer_head to mark dirty * * mark_buffer_dirty() will set the dirty bit against the buffer, then set * its backing page dirty, then tag the page as dirty in the page cache * and then attach the address_space's inode to its superblock's dirty * inode list. * * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock, * i_pages lock and mapping->host->i_lock. */ void mark_buffer_dirty(struct buffer_head *bh) { WARN_ON_ONCE(!buffer_uptodate(bh)); trace_block_dirty_buffer(bh); /* * Very *carefully* optimize the it-is-already-dirty case. * * Don't let the final "is it dirty" escape to before we * perhaps modified the buffer. */ if (buffer_dirty(bh)) { smp_mb(); if (buffer_dirty(bh)) return; } if (!test_set_buffer_dirty(bh)) { struct folio *folio = bh->b_folio; struct address_space *mapping = NULL; if (!folio_test_set_dirty(folio)) { mapping = folio->mapping; if (mapping) __folio_mark_dirty(folio, mapping, 0); } if (mapping) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } } EXPORT_SYMBOL(mark_buffer_dirty); void mark_buffer_write_io_error(struct buffer_head *bh) { set_buffer_write_io_error(bh); /* FIXME: do we need to set this in both places? */ if (bh->b_folio && bh->b_folio->mapping) mapping_set_error(bh->b_folio->mapping, -EIO); if (bh->b_assoc_map) mapping_set_error(bh->b_assoc_map, -EIO); } EXPORT_SYMBOL(mark_buffer_write_io_error); /** * __brelse - Release a buffer. * @bh: The buffer to release. * * This variant of brelse() can be called if @bh is guaranteed to not be NULL. */ void __brelse(struct buffer_head *bh) { if (atomic_read(&bh->b_count)) { put_bh(bh); return; } WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); } EXPORT_SYMBOL(__brelse); /** * __bforget - Discard any dirty data in a buffer. * @bh: The buffer to forget. * * This variant of bforget() can be called if @bh is guaranteed to not * be NULL. */ void __bforget(struct buffer_head *bh) { clear_buffer_dirty(bh); if (bh->b_assoc_map) { struct address_space *buffer_mapping = bh->b_folio->mapping; spin_lock(&buffer_mapping->i_private_lock); list_del_init(&bh->b_assoc_buffers); bh->b_assoc_map = NULL; spin_unlock(&buffer_mapping->i_private_lock); } __brelse(bh); } EXPORT_SYMBOL(__bforget); static struct buffer_head *__bread_slow(struct buffer_head *bh) { lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); return bh; } else { get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(REQ_OP_READ, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; } brelse(bh); return NULL; } /* * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their * refcount elevated by one when they're in an LRU. A buffer can only appear * once in a particular CPU's LRU. A single buffer can be present in multiple * CPU's LRUs at the same time. * * This is a transparent caching front-end to sb_bread(), sb_getblk() and * sb_find_get_block(). * * The LRUs themselves only need locking against invalidate_bh_lrus. We use * a local interrupt disable for that. */ #define BH_LRU_SIZE 16 struct bh_lru { struct buffer_head *bhs[BH_LRU_SIZE]; }; static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; #ifdef CONFIG_SMP #define bh_lru_lock() local_irq_disable() #define bh_lru_unlock() local_irq_enable() #else #define bh_lru_lock() preempt_disable() #define bh_lru_unlock() preempt_enable() #endif static inline void check_irqs_on(void) { #ifdef irqs_disabled BUG_ON(irqs_disabled()); #endif } /* * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is * inserted at the front, and the buffer_head at the back if any is evicted. * Or, if already in the LRU it is moved to the front. */ static void bh_lru_install(struct buffer_head *bh) { struct buffer_head *evictee = bh; struct bh_lru *b; int i; check_irqs_on(); bh_lru_lock(); /* * the refcount of buffer_head in bh_lru prevents dropping the * attached page(i.e., try_to_free_buffers) so it could cause * failing page migration. * Skip putting upcoming bh into bh_lru until migration is done. */ if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) { bh_lru_unlock(); return; } b = this_cpu_ptr(&bh_lrus); for (i = 0; i < BH_LRU_SIZE; i++) { swap(evictee, b->bhs[i]); if (evictee == bh) { bh_lru_unlock(); return; } } get_bh(bh); bh_lru_unlock(); brelse(evictee); } /* * Look up the bh in this cpu's LRU. If it's there, move it to the head. */ static struct buffer_head * lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *ret = NULL; unsigned int i; check_irqs_on(); bh_lru_lock(); if (cpu_is_isolated(smp_processor_id())) { bh_lru_unlock(); return NULL; } for (i = 0; i < BH_LRU_SIZE; i++) { struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && bh->b_size == size) { if (i) { while (i) { __this_cpu_write(bh_lrus.bhs[i], __this_cpu_read(bh_lrus.bhs[i - 1])); i--; } __this_cpu_write(bh_lrus.bhs[0], bh); } get_bh(bh); ret = bh; break; } } bh_lru_unlock(); return ret; } /* * Perform a pagecache lookup for the matching buffer. If it's there, refresh * it in the LRU and mark it as accessed. If it is not present then return * NULL. Atomic context callers may also return NULL if the buffer is being * migrated; similarly the page is not marked accessed either. */ static struct buffer_head * find_get_block_common(struct block_device *bdev, sector_t block, unsigned size, bool atomic) { struct buffer_head *bh = lookup_bh_lru(bdev, block, size); if (bh == NULL) { /* __find_get_block_slow will mark the page accessed */ bh = __find_get_block_slow(bdev, block, atomic); if (bh) bh_lru_install(bh); } else touch_buffer(bh); return bh; } struct buffer_head * __find_get_block(struct block_device *bdev, sector_t block, unsigned size) { return find_get_block_common(bdev, block, size, true); } EXPORT_SYMBOL(__find_get_block); /* same as __find_get_block() but allows sleeping contexts */ struct buffer_head * __find_get_block_nonatomic(struct block_device *bdev, sector_t block, unsigned size) { return find_get_block_common(bdev, block, size, false); } EXPORT_SYMBOL(__find_get_block_nonatomic); /** * bdev_getblk - Get a buffer_head in a block device's buffer cache. * @bdev: The block device. * @block: The block number. * @size: The size of buffer_heads for this @bdev. * @gfp: The memory allocation flags to use. * * The returned buffer head has its reference count incremented, but is * not locked. The caller should call brelse() when it has finished * with the buffer. The buffer may not be uptodate. If needed, the * caller can bring it uptodate either by reading it or overwriting it. * * Return: The buffer head, or NULL if memory could not be allocated. */ struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, unsigned size, gfp_t gfp) { struct buffer_head *bh; if (gfpflags_allow_blocking(gfp)) bh = __find_get_block_nonatomic(bdev, block, size); else bh = __find_get_block(bdev, block, size); might_alloc(gfp); if (bh) return bh; return __getblk_slow(bdev, block, size, gfp); } EXPORT_SYMBOL(bdev_getblk); /* * Do async read-ahead on a buffer.. */ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = bdev_getblk(bdev, block, size, GFP_NOWAIT | __GFP_MOVABLE); if (likely(bh)) { bh_readahead(bh, REQ_RAHEAD); brelse(bh); } } EXPORT_SYMBOL(__breadahead); /** * __bread_gfp() - Read a block. * @bdev: The block device to read from. * @block: Block number in units of block size. * @size: The block size of this device in bytes. * @gfp: Not page allocation flags; see below. * * You are not expected to call this function. You should use one of * sb_bread(), sb_bread_unmovable() or __bread(). * * Read a specified block, and return the buffer head that refers to it. * If @gfp is 0, the memory will be allocated using the block device's * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be * allocated from a movable area. Do not pass in a complete set of * GFP flags. * * The returned buffer head has its refcount increased. The caller should * call brelse() when it has finished with the buffer. * * Context: May sleep waiting for I/O. * Return: NULL if the block was unreadable. */ struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block, unsigned size, gfp_t gfp) { struct buffer_head *bh; gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); /* * Prefer looping in the allocator rather than here, at least that * code knows what it's doing. */ gfp |= __GFP_NOFAIL; bh = bdev_getblk(bdev, block, size, gfp); if (likely(bh) && !buffer_uptodate(bh)) bh = __bread_slow(bh); return bh; } EXPORT_SYMBOL(__bread_gfp); static void __invalidate_bh_lrus(struct bh_lru *b) { int i; for (i = 0; i < BH_LRU_SIZE; i++) { brelse(b->bhs[i]); b->bhs[i] = NULL; } } /* * invalidate_bh_lrus() is called rarely - but not only at unmount. * This doesn't race because it runs in each cpu either in irq * or with preempt disabled. */ static void invalidate_bh_lru(void *arg) { struct bh_lru *b = &get_cpu_var(bh_lrus); __invalidate_bh_lrus(b); put_cpu_var(bh_lrus); } bool has_bh_in_lru(int cpu, void *dummy) { struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); int i; for (i = 0; i < BH_LRU_SIZE; i++) { if (b->bhs[i]) return true; } return false; } void invalidate_bh_lrus(void) { on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); } EXPORT_SYMBOL_GPL(invalidate_bh_lrus); /* * It's called from workqueue context so we need a bh_lru_lock to close * the race with preemption/irq. */ void invalidate_bh_lrus_cpu(void) { struct bh_lru *b; bh_lru_lock(); b = this_cpu_ptr(&bh_lrus); __invalidate_bh_lrus(b); bh_lru_unlock(); } void folio_set_bh(struct buffer_head *bh, struct folio *folio, unsigned long offset) { bh->b_folio = folio; BUG_ON(offset >= folio_size(folio)); if (folio_test_highmem(folio)) /* * This catches illegal uses and preserves the offset: */ bh->b_data = (char *)(0 + offset); else bh->b_data = folio_address(folio) + offset; } EXPORT_SYMBOL(folio_set_bh); /* * Called when truncating a buffer on a page completely. */ /* Bits that are cleared during an invalidate */ #define BUFFER_FLAGS_DISCARD \ (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1 << BH_Delay | 1 << BH_Unwritten) static void discard_buffer(struct buffer_head * bh) { unsigned long b_state; lock_buffer(bh); clear_buffer_dirty(bh); bh->b_bdev = NULL; b_state = READ_ONCE(bh->b_state); do { } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state, b_state & ~BUFFER_FLAGS_DISCARD)); unlock_buffer(bh); } /** * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. * @folio: The folio which is affected. * @offset: start of the range to invalidate * @length: length of the range to invalidate * * block_invalidate_folio() is called when all or part of the folio has been * invalidated by a truncate operation. * * block_invalidate_folio() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct buffer_head *head, *bh, *next; size_t curr_off = 0; size_t stop = length + offset; BUG_ON(!folio_test_locked(folio)); /* * Check for overflow */ BUG_ON(stop > folio_size(folio) || stop < length); head = folio_buffers(folio); if (!head) return; bh = head; do { size_t next_off = curr_off + bh->b_size; next = bh->b_this_page; /* * Are we still fully in range ? */ if (next_off > stop) goto out; /* * is this block fully invalidated? */ if (offset <= curr_off) discard_buffer(bh); curr_off = next_off; bh = next; } while (bh != head); /* * We release buffers only if the entire folio is being invalidated. * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ if (length == folio_size(folio)) filemap_release_folio(folio, 0); out: folio_clear_mappedtodisk(folio); } EXPORT_SYMBOL(block_invalidate_folio); /* * We attach and possibly dirty the buffers atomically wrt * block_dirty_folio() via i_private_lock. try_to_free_buffers * is already excluded via the folio lock. */ struct buffer_head *create_empty_buffers(struct folio *folio, unsigned long blocksize, unsigned long b_state) { struct buffer_head *bh, *head, *tail; gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL; head = folio_alloc_buffers(folio, blocksize, gfp); bh = head; do { bh->b_state |= b_state; tail = bh; bh = bh->b_this_page; } while (bh); tail->b_this_page = head; spin_lock(&folio->mapping->i_private_lock); if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { bh = head; do { if (folio_test_dirty(folio)) set_buffer_dirty(bh); if (folio_test_uptodate(folio)) set_buffer_uptodate(bh); bh = bh->b_this_page; } while (bh != head); } folio_attach_private(folio, head); spin_unlock(&folio->mapping->i_private_lock); return head; } EXPORT_SYMBOL(create_empty_buffers); /** * clean_bdev_aliases: clean a range of buffers in block device * @bdev: Block device to clean buffers in * @block: Start of a range of blocks to clean * @len: Number of blocks to clean * * We are taking a range of blocks for data and we don't want writeback of any * buffer-cache aliases starting from return from this function and until the * moment when something will explicitly mark the buffer dirty (hopefully that * will not happen until we will free that block ;-) We don't even need to mark * it not-uptodate - nobody can expect anything from a newly allocated buffer * anyway. We used to use unmap_buffer() for such invalidation, but that was * wrong. We definitely don't want to mark the alias unmapped, for example - it * would confuse anyone who might pick it with bread() afterwards... * * Also.. Note that bforget() doesn't lock the buffer. So there can be * writeout I/O going on against recently-freed buffers. We don't wait on that * I/O in bforget() - it's more efficient to wait on the I/O only if we really * need to. That happens here. */ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) { struct address_space *bd_mapping = bdev->bd_mapping; const int blkbits = bd_mapping->host->i_blkbits; struct folio_batch fbatch; pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE; pgoff_t end; int i, count; struct buffer_head *bh; struct buffer_head *head; end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE; folio_batch_init(&fbatch); while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { count = folio_batch_count(&fbatch); for (i = 0; i < count; i++) { struct folio *folio = fbatch.folios[i]; if (!folio_buffers(folio)) continue; /* * We use folio lock instead of bd_mapping->i_private_lock * to pin buffers here since we can afford to sleep and * it scales better than a global spinlock lock. */ folio_lock(folio); /* Recheck when the folio is locked which pins bhs */ head = folio_buffers(folio); if (!head) goto unlock_page; bh = head; do { if (!buffer_mapped(bh) || (bh->b_blocknr < block)) goto next; if (bh->b_blocknr >= block + len) break; clear_buffer_dirty(bh); wait_on_buffer(bh); clear_buffer_req(bh); next: bh = bh->b_this_page; } while (bh != head); unlock_page: folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); /* End of range already reached? */ if (index > end || !index) break; } } EXPORT_SYMBOL(clean_bdev_aliases); static struct buffer_head *folio_create_buffers(struct folio *folio, struct inode *inode, unsigned int b_state) { struct buffer_head *bh; BUG_ON(!folio_test_locked(folio)); bh = folio_buffers(folio); if (!bh) bh = create_empty_buffers(folio, 1 << READ_ONCE(inode->i_blkbits), b_state); return bh; } /* * NOTE! All mapped/uptodate combinations are valid: * * Mapped Uptodate Meaning * * No No "unknown" - must do get_block() * No Yes "hole" - zero-filled * Yes No "allocated" - allocated on disk, not read in * Yes Yes "valid" - allocated and up-to-date in memory. * * "Dirty" is valid only with the last case (mapped+uptodate). */ /* * While block_write_full_folio is writing back the dirty buffers under * the page lock, whoever dirtied the buffers may decide to clean them * again at any time. We handle that by only looking at the buffer * state inside lock_buffer(). * * If block_write_full_folio() is called for regular writeback * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a * locked buffer. This only can happen if someone has written the buffer * directly, with submit_bh(). At the address_space level PageWriteback * prevents this contention from occurring. * * If block_write_full_folio() is called with wbc->sync_mode == * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this * causes the writes to be flagged as synchronous writes. */ int __block_write_full_folio(struct inode *inode, struct folio *folio, get_block_t *get_block, struct writeback_control *wbc) { int err; sector_t block; sector_t last_block; struct buffer_head *bh, *head; size_t blocksize; int nr_underway = 0; blk_opf_t write_flags = wbc_to_write_flags(wbc); head = folio_create_buffers(folio, inode, (1 << BH_Dirty) | (1 << BH_Uptodate)); /* * Be very careful. We have no exclusion from block_dirty_folio * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the folio stays dirty. * * Buffers outside i_size may be dirtied by block_dirty_folio; * handle that here by just cleaning them. */ bh = head; blocksize = bh->b_size; block = div_u64(folio_pos(folio), blocksize); last_block = div_u64(i_size_read(inode) - 1, blocksize); /* * Get all the dirty buffers mapped to disk addresses and * handle any aliases from the underlying blockdev's mapping. */ do { if (block > last_block) { /* * mapped buffers outside i_size will occur, because * this folio can be outside i_size when there is a * truncate in progress. */ /* * The buffer was zeroed by block_write_full_folio() */ clear_buffer_dirty(bh); set_buffer_uptodate(bh); } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) goto recover; clear_buffer_delay(bh); if (buffer_new(bh)) { /* blockdev mappings never come here */ clear_buffer_new(bh); clean_bdev_bh_alias(bh); } } bh = bh->b_this_page; block++; } while (bh != head); do { if (!buffer_mapped(bh)) continue; /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the folio. Note that this can * potentially cause a busy-wait loop from writeback threads * and kswapd activity, but those code paths have their own * higher-level throttling. */ if (wbc->sync_mode != WB_SYNC_NONE) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { folio_redirty_for_writepage(wbc, folio); continue; } if (test_clear_buffer_dirty(bh)) { mark_buffer_async_write_endio(bh, end_buffer_async_write); } else { unlock_buffer(bh); } } while ((bh = bh->b_this_page) != head); /* * The folio and its buffers are protected by the writeback flag, * so we can drop the bh refcounts early. */ BUG_ON(folio_test_writeback(folio)); folio_start_writeback(folio); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, inode->i_write_hint, wbc); nr_underway++; } bh = next; } while (bh != head); folio_unlock(folio); err = 0; done: if (nr_underway == 0) { /* * The folio was marked dirty, but the buffers were * clean. Someone wrote them back by hand with * write_dirty_buffer/submit_bh. A rare case. */ folio_end_writeback(folio); /* * The folio and buffer_heads can be released at any time from * here on. */ } return err; recover: /* * ENOSPC, or some other error. We may already have added some * blocks to the file, so we need to write these out to avoid * exposing stale data. * The folio is currently locked and not marked for writeback */ bh = head; /* Recovery: lock and submit the mapped buffers */ do { if (buffer_mapped(bh) && buffer_dirty(bh) && !buffer_delay(bh)) { lock_buffer(bh); mark_buffer_async_write_endio(bh, end_buffer_async_write); } else { /* * The buffer may have been set dirty during * attachment to a dirty folio. */ clear_buffer_dirty(bh); } } while ((bh = bh->b_this_page) != head); BUG_ON(folio_test_writeback(folio)); mapping_set_error(folio->mapping, err); folio_start_writeback(folio); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { clear_buffer_dirty(bh); submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, inode->i_write_hint, wbc); nr_underway++; } bh = next; } while (bh != head); folio_unlock(folio); goto done; } EXPORT_SYMBOL(__block_write_full_folio); /* * If a folio has any new buffers, zero them out here, and mark them uptodate * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to) { size_t block_start, block_end; struct buffer_head *head, *bh; BUG_ON(!folio_test_locked(folio)); head = folio_buffers(folio); if (!head) return; bh = head; block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!folio_test_uptodate(folio)) { size_t start, xend; start = max(from, block_start); xend = min(to, block_end); folio_zero_segment(folio, start, xend); set_buffer_uptodate(bh); } clear_buffer_new(bh); mark_buffer_dirty(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } EXPORT_SYMBOL(folio_zero_new_buffers); static int iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, const struct iomap *iomap) { loff_t offset = (loff_t)block << inode->i_blkbits; bh->b_bdev = iomap->bdev; /* * Block points to offset in file we need to map, iomap contains * the offset at which the map starts. If the map ends before the * current block, then do not map the buffer and let the caller * handle it. */ if (offset >= iomap->offset + iomap->length) return -EIO; switch (iomap->type) { case IOMAP_HOLE: /* * If the buffer is not up to date or beyond the current EOF, * we need to mark it as new to ensure sub-block zeroing is * executed if necessary. */ if (!buffer_uptodate(bh) || (offset >= i_size_read(inode))) set_buffer_new(bh); return 0; case IOMAP_DELALLOC: if (!buffer_uptodate(bh) || (offset >= i_size_read(inode))) set_buffer_new(bh); set_buffer_uptodate(bh); set_buffer_mapped(bh); set_buffer_delay(bh); return 0; case IOMAP_UNWRITTEN: /* * For unwritten regions, we always need to ensure that regions * in the block we are not writing to are zeroed. Mark the * buffer as new to ensure this. */ set_buffer_new(bh); set_buffer_unwritten(bh); fallthrough; case IOMAP_MAPPED: if ((iomap->flags & IOMAP_F_NEW) || offset >= i_size_read(inode)) { /* * This can happen if truncating the block device races * with the check in the caller as i_size updates on * block devices aren't synchronized by i_rwsem for * block devices. */ if (S_ISBLK(inode->i_mode)) return -EIO; set_buffer_new(bh); } bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> inode->i_blkbits; set_buffer_mapped(bh); return 0; default: WARN_ON_ONCE(1); return -EIO; } } int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, get_block_t *get_block, const struct iomap *iomap) { size_t from = offset_in_folio(folio, pos); size_t to = from + len; struct inode *inode = folio->mapping->host; size_t block_start, block_end; sector_t block; int err = 0; size_t blocksize; struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; BUG_ON(!folio_test_locked(folio)); BUG_ON(to > folio_size(folio)); BUG_ON(from > to); head = folio_create_buffers(folio, inode, 0); blocksize = head->b_size; block = div_u64(folio_pos(folio), blocksize); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start=block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (folio_test_uptodate(folio)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } continue; } if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); if (get_block) err = get_block(inode, block, bh, 1); else err = iomap_to_bh(inode, block, bh, iomap); if (err) break; if (buffer_new(bh)) { clean_bdev_bh_alias(bh); if (folio_test_uptodate(folio)) { clear_buffer_new(bh); set_buffer_uptodate(bh); mark_buffer_dirty(bh); continue; } if (block_end > to || block_start < from) folio_zero_segments(folio, to, block_end, block_start, from); continue; } } if (folio_test_uptodate(folio)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { bh_read_nowait(bh, 0); *wait_bh++=bh; } } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) err = -EIO; } if (unlikely(err)) folio_zero_new_buffers(folio, from, to); return err; } int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, get_block_t *get_block) { return __block_write_begin_int(folio, pos, len, get_block, NULL); } EXPORT_SYMBOL(__block_write_begin); void block_commit_write(struct folio *folio, size_t from, size_t to) { size_t block_start, block_end; bool partial = false; unsigned blocksize; struct buffer_head *bh, *head; bh = head = folio_buffers(folio); if (!bh) return; blocksize = bh->b_size; block_start = 0; do { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (!buffer_uptodate(bh)) partial = true; } else { set_buffer_uptodate(bh); mark_buffer_dirty(bh); } if (buffer_new(bh)) clear_buffer_new(bh); block_start = block_end; bh = bh->b_this_page; } while (bh != head); /* * If this is a partial write which happened to make all buffers * uptodate then we can optimize away a bogus read_folio() for * the next read(). Here we 'discover' whether the folio went * uptodate as a result of this (potentially partial) write. */ if (!partial) folio_mark_uptodate(folio); } EXPORT_SYMBOL(block_commit_write); /* * block_write_begin takes care of the basic task of block allocation and * bringing partial write blocks uptodate first. * * The filesystem needs to handle block truncation upon failure. */ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, get_block_t *get_block) { pgoff_t index = pos >> PAGE_SHIFT; struct folio *folio; int status; folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) return PTR_ERR(folio); status = __block_write_begin_int(folio, pos, len, get_block, NULL); if (unlikely(status)) { folio_unlock(folio); folio_put(folio); folio = NULL; } *foliop = folio; return status; } EXPORT_SYMBOL(block_write_begin); int block_write_end(loff_t pos, unsigned len, unsigned copied, struct folio *folio) { size_t start = pos - folio_pos(folio); if (unlikely(copied < len)) { /* * The buffers that were written will now be uptodate, so * we don't have to worry about a read_folio reading them * and overwriting a partial write. However if we have * encountered a short write and only partially written * into a buffer, it will not be marked uptodate, so a * read_folio might come in and destroy our partial write. * * Do the simplest thing, and just treat any short write to a * non uptodate folio as a zero-length write, and force the * caller to redo the whole thing. */ if (!folio_test_uptodate(folio)) copied = 0; folio_zero_new_buffers(folio, start+copied, start+len); } flush_dcache_folio(folio); /* This could be a short (even 0-length) commit */ block_commit_write(folio, start, start + copied); return copied; } EXPORT_SYMBOL(block_write_end); int generic_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; loff_t old_size = inode->i_size; bool i_size_changed = false; copied = block_write_end(pos, len, copied, folio); /* * No need to use i_size_read() here, the i_size cannot change under us * because we hold i_rwsem. * * But it's important to update i_size while still holding folio lock: * page writeout could otherwise come in and zero beyond i_size. */ if (pos + copied > inode->i_size) { i_size_write(inode, pos + copied); i_size_changed = true; } folio_unlock(folio); folio_put(folio); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed) mark_inode_dirty(inode); return copied; } EXPORT_SYMBOL(generic_write_end); /* * block_is_partially_uptodate checks whether buffers within a folio are * uptodate or not. * * Returns true if all buffers which correspond to the specified part * of the folio are uptodate. */ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { unsigned block_start, block_end, blocksize; unsigned to; struct buffer_head *bh, *head; bool ret = true; head = folio_buffers(folio); if (!head) return false; blocksize = head->b_size; to = min_t(unsigned, folio_size(folio) - from, count); to = from + to; if (from < blocksize && to > folio_size(folio) - blocksize) return false; bh = head; block_start = 0; do { block_end = block_start + blocksize; if (block_end > from && block_start < to) { if (!buffer_uptodate(bh)) { ret = false; break; } if (block_end >= to) break; } block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; } EXPORT_SYMBOL(block_is_partially_uptodate); /* * Generic "read_folio" function for block devices that have the normal * get_block functionality. This is most of the block device filesystems. * Reads the folio asynchronously --- the unlock_buffer() and * set/clear_buffer_uptodate() functions propagate buffer state into the * folio once IO has completed. */ int block_read_full_folio(struct folio *folio, get_block_t *get_block) { struct inode *inode = folio->mapping->host; sector_t iblock, lblock; struct buffer_head *bh, *head, *prev = NULL; size_t blocksize; int fully_mapped = 1; bool page_error = false; loff_t limit = i_size_read(inode); /* This is needed for ext4. */ if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) limit = inode->i_sb->s_maxbytes; head = folio_create_buffers(folio, inode, 0); blocksize = head->b_size; iblock = div_u64(folio_pos(folio), blocksize); lblock = div_u64(limit + blocksize - 1, blocksize); bh = head; do { if (buffer_uptodate(bh)) continue; if (!buffer_mapped(bh)) { int err = 0; fully_mapped = 0; if (iblock < lblock) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) page_error = true; } if (!buffer_mapped(bh)) { folio_zero_range(folio, bh_offset(bh), blocksize); if (!err) set_buffer_uptodate(bh); continue; } /* * get_block() might have updated the buffer * synchronously */ if (buffer_uptodate(bh)) continue; } lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); continue; } mark_buffer_async_read(bh); if (prev) submit_bh(REQ_OP_READ, prev); prev = bh; } while (iblock++, (bh = bh->b_this_page) != head); if (fully_mapped) folio_set_mappedtodisk(folio); /* * All buffers are uptodate or get_block() returned an error * when trying to map them - we must finish the read because * end_buffer_async_read() will never be called on any buffer * in this folio. */ if (prev) submit_bh(REQ_OP_READ, prev); else folio_end_read(folio, !page_error); return 0; } EXPORT_SYMBOL(block_read_full_folio); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to * deal with the hole. */ int generic_cont_expand_simple(struct inode *inode, loff_t size) { struct address_space *mapping = inode->i_mapping; const struct address_space_operations *aops = mapping->a_ops; struct folio *folio; void *fsdata = NULL; int err; err = inode_newsize_ok(inode, size); if (err) goto out; err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata); if (err) goto out; err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata); BUG_ON(err > 0); out: return err; } EXPORT_SYMBOL(generic_cont_expand_simple); static int cont_expand_zero(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, loff_t *bytes) { struct inode *inode = mapping->host; const struct address_space_operations *aops = mapping->a_ops; unsigned int blocksize = i_blocksize(inode); struct folio *folio; void *fsdata = NULL; pgoff_t index, curidx; loff_t curpos; unsigned zerofrom, offset, len; int err = 0; index = pos >> PAGE_SHIFT; offset = pos & ~PAGE_MASK; while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { zerofrom = curpos & ~PAGE_MASK; if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } len = PAGE_SIZE - zerofrom; err = aops->write_begin(iocb, mapping, curpos, len, &folio, &fsdata); if (err) goto out; folio_zero_range(folio, offset_in_folio(folio, curpos), len); err = aops->write_end(iocb, mapping, curpos, len, len, folio, fsdata); if (err < 0) goto out; BUG_ON(err != len); err = 0; balance_dirty_pages_ratelimited(mapping); if (fatal_signal_pending(current)) { err = -EINTR; goto out; } } /* page covers the boundary, find the boundary offset */ if (index == curidx) { zerofrom = curpos & ~PAGE_MASK; /* if we will expand the thing last block will be filled */ if (offset <= zerofrom) { goto out; } if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } len = offset - zerofrom; err = aops->write_begin(iocb, mapping, curpos, len, &folio, &fsdata); if (err) goto out; folio_zero_range(folio, offset_in_folio(folio, curpos), len); err = aops->write_end(iocb, mapping, curpos, len, len, folio, fsdata); if (err < 0) goto out; BUG_ON(err != len); err = 0; } out: return err; } /* * For moronic filesystems that do not allow holes in file. * We may have to extend the file. */ int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata, get_block_t *get_block, loff_t *bytes) { struct inode *inode = mapping->host; unsigned int blocksize = i_blocksize(inode); unsigned int zerofrom; int err; err = cont_expand_zero(iocb, mapping, pos, bytes); if (err) return err; zerofrom = *bytes & ~PAGE_MASK; if (pos+len > *bytes && zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } return block_write_begin(mapping, pos, len, foliop, get_block); } EXPORT_SYMBOL(cont_write_begin); /* * block_page_mkwrite() is not allowed to change the file size as it gets * called from a page fault handler when a page is first dirtied. Hence we must * be careful to check for EOF conditions here. We set the page up correctly * for a written page which means we get ENOSPC checking when writing into * holes and correct delalloc and unwritten extent mapping on filesystems that * support these features. * * We are not allowed to take the i_rwsem here so we have to play games to * protect against truncate races as the page could now be beyond EOF. Because * truncate writes the inode size before removing pages, once we have the * page lock we can determine safely if the page is beyond EOF. If it is not * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. * * Direct callers of this function should protect against filesystem freezing * using sb_start_pagefault() - sb_end_pagefault() functions. */ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vma->vm_file); unsigned long end; loff_t size; int ret; folio_lock(folio); size = i_size_read(inode); if ((folio->mapping != inode->i_mapping) || (folio_pos(folio) >= size)) { /* We overload EFAULT to mean page got truncated */ ret = -EFAULT; goto out_unlock; } end = folio_size(folio); /* folio is wholly or partially inside EOF */ if (folio_pos(folio) + end > size) end = size - folio_pos(folio); ret = __block_write_begin_int(folio, 0, end, get_block, NULL); if (unlikely(ret)) goto out_unlock; block_commit_write(folio, 0, end); folio_mark_dirty(folio); folio_wait_stable(folio); return 0; out_unlock: folio_unlock(folio); return ret; } EXPORT_SYMBOL(block_page_mkwrite); int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { pgoff_t index = from >> PAGE_SHIFT; unsigned blocksize; sector_t iblock; size_t offset, length, pos; struct inode *inode = mapping->host; struct folio *folio; struct buffer_head *bh; int err = 0; blocksize = i_blocksize(inode); length = from & (blocksize - 1); /* Block boundary? Nothing to do */ if (!length) return 0; length = blocksize - length; iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits; folio = filemap_grab_folio(mapping, index); if (IS_ERR(folio)) return PTR_ERR(folio); bh = folio_buffers(folio); if (!bh) bh = create_empty_buffers(folio, blocksize, 0); /* Find the buffer that contains "offset" */ offset = offset_in_folio(folio, from); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) goto unlock; /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) goto unlock; } /* Ok, it's mapped. Make sure it's up-to-date */ if (folio_test_uptodate(folio)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { err = bh_read(bh, 0); /* Uhhuh. Read error. Complain and punt. */ if (err < 0) goto unlock; } folio_zero_range(folio, offset, length); mark_buffer_dirty(bh); unlock: folio_unlock(folio); folio_put(folio); return err; } EXPORT_SYMBOL(block_truncate_page); /* * The generic write folio function for buffer-backed address_spaces */ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, void *get_block) { struct inode * const inode = folio->mapping->host; loff_t i_size = i_size_read(inode); /* Is the folio fully inside i_size? */ if (folio_pos(folio) + folio_size(folio) <= i_size) return __block_write_full_folio(inode, folio, get_block, wbc); /* Is the folio fully outside i_size? (truncate in progress) */ if (folio_pos(folio) >= i_size) { folio_unlock(folio); return 0; /* don't care */ } /* * The folio straddles i_size. It must be zeroed out on each and every * writeback invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ folio_zero_segment(folio, offset_in_folio(folio, i_size), folio_size(folio)); return __block_write_full_folio(inode, folio, get_block, wbc); } sector_t generic_block_bmap(struct address_space *mapping, sector_t block, get_block_t *get_block) { struct inode *inode = mapping->host; struct buffer_head tmp = { .b_size = i_blocksize(inode), }; get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } EXPORT_SYMBOL(generic_block_bmap); static void end_bio_bh_io_sync(struct bio *bio) { struct buffer_head *bh = bio->bi_private; if (unlikely(bio_flagged(bio, BIO_QUIET))) set_bit(BH_Quiet, &bh->b_state); bh->b_end_io(bh, !bio->bi_status); bio_put(bio); } static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, enum rw_hint write_hint, struct writeback_control *wbc) { const enum req_op op = opf & REQ_OP_MASK; struct bio *bio; BUG_ON(!buffer_locked(bh)); BUG_ON(!buffer_mapped(bh)); BUG_ON(!bh->b_end_io); BUG_ON(buffer_delay(bh)); BUG_ON(buffer_unwritten(bh)); /* * Only clear out a write error when rewriting */ if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) clear_buffer_write_io_error(bh); if (buffer_meta(bh)) opf |= REQ_META; if (buffer_prio(bh)) opf |= REQ_PRIO; bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_write_hint = write_hint; bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh)); bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; /* Take care of bh's that straddle the end of the device */ guard_bio_eod(bio); if (wbc) { wbc_init_bio(wbc, bio); wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size); } submit_bio(bio); } void submit_bh(blk_opf_t opf, struct buffer_head *bh) { submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL); } EXPORT_SYMBOL(submit_bh); void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) { lock_buffer(bh); if (!test_clear_buffer_dirty(bh)) { unlock_buffer(bh); return; } bh->b_end_io = end_buffer_write_sync; get_bh(bh); submit_bh(REQ_OP_WRITE | op_flags, bh); } EXPORT_SYMBOL(write_dirty_buffer); /* * For a data-integrity writeout, we need to wait upon any in-progress I/O * and then start new I/O and then wait upon it. The caller must have a ref on * the buffer_head. */ int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) { WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { /* * The bh should be mapped, but it might not be if the * device was hot-removed. Not much we can do but fail the I/O. */ if (!buffer_mapped(bh)) { unlock_buffer(bh); return -EIO; } get_bh(bh); bh->b_end_io = end_buffer_write_sync; submit_bh(REQ_OP_WRITE | op_flags, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) return -EIO; } else { unlock_buffer(bh); } return 0; } EXPORT_SYMBOL(__sync_dirty_buffer); int sync_dirty_buffer(struct buffer_head *bh) { return __sync_dirty_buffer(bh, REQ_SYNC); } EXPORT_SYMBOL(sync_dirty_buffer); static inline int buffer_busy(struct buffer_head *bh) { return atomic_read(&bh->b_count) | (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); } static bool drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) { struct buffer_head *head = folio_buffers(folio); struct buffer_head *bh; bh = head; do { if (buffer_busy(bh)) goto failed; bh = bh->b_this_page; } while (bh != head); do { struct buffer_head *next = bh->b_this_page; if (bh->b_assoc_map) __remove_assoc_queue(bh); bh = next; } while (bh != head); *buffers_to_free = head; folio_detach_private(folio); return true; failed: return false; } /** * try_to_free_buffers - Release buffers attached to this folio. * @folio: The folio. * * If any buffers are in use (dirty, under writeback, elevated refcount), * no buffers will be freed. * * If the folio is dirty but all the buffers are clean then we need to * be sure to mark the folio clean as well. This is because the folio * may be against a block device, and a later reattachment of buffers * to a dirty folio will set *all* buffers dirty. Which would corrupt * filesystem data on the same device. * * The same applies to regular filesystem folios: if all the buffers are * clean then we set the folio clean and proceed. To do that, we require * total exclusion from block_dirty_folio(). That is obtained with * i_private_lock. * * Exclusion against try_to_free_buffers may be obtained by either * locking the folio or by holding its mapping's i_private_lock. * * Context: Process context. @folio must be locked. Will not sleep. * Return: true if all buffers attached to this folio were freed. */ bool try_to_free_buffers(struct folio *folio) { struct address_space * const mapping = folio->mapping; struct buffer_head *buffers_to_free = NULL; bool ret = 0; BUG_ON(!folio_test_locked(folio)); if (folio_test_writeback(folio)) return false; if (mapping == NULL) { /* can this still happen? */ ret = drop_buffers(folio, &buffers_to_free); goto out; } spin_lock(&mapping->i_private_lock); ret = drop_buffers(folio, &buffers_to_free); /* * If the filesystem writes its buffers by hand (eg ext3) * then we can have clean buffers against a dirty folio. We * clean the folio here; otherwise the VM will never notice * that the filesystem did any IO at all. * * Also, during truncate, discard_buffer will have marked all * the folio's buffers clean. We discover that here and clean * the folio also. * * i_private_lock must be held over this entire operation in order * to synchronise against block_dirty_folio and prevent the * dirty bit from being lost. */ if (ret) folio_cancel_dirty(folio); spin_unlock(&mapping->i_private_lock); out: if (buffers_to_free) { struct buffer_head *bh = buffers_to_free; do { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); bh = next; } while (bh != buffers_to_free); } return ret; } EXPORT_SYMBOL(try_to_free_buffers); /* * Buffer-head allocation */ static struct kmem_cache *bh_cachep __ro_after_init; /* * Once the number of bh's in the machine exceeds this level, we start * stripping them in writeback. */ static unsigned long max_buffer_heads __ro_after_init; int buffer_heads_over_limit; struct bh_accounting { int nr; /* Number of live bh's */ int ratelimit; /* Limit cacheline bouncing */ }; static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; static void recalc_bh_state(void) { int i; int tot = 0; if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) return; __this_cpu_write(bh_accounting.ratelimit, 0); for_each_online_cpu(i) tot += per_cpu(bh_accounting, i).nr; buffer_heads_over_limit = (tot > max_buffer_heads); } struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); spin_lock_init(&ret->b_uptodate_lock); preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); preempt_enable(); } return ret; } EXPORT_SYMBOL(alloc_buffer_head); void free_buffer_head(struct buffer_head *bh) { BUG_ON(!list_empty(&bh->b_assoc_buffers)); kmem_cache_free(bh_cachep, bh); preempt_disable(); __this_cpu_dec(bh_accounting.nr); recalc_bh_state(); preempt_enable(); } EXPORT_SYMBOL(free_buffer_head); static int buffer_exit_cpu_dead(unsigned int cpu) { int i; struct bh_lru *b = &per_cpu(bh_lrus, cpu); for (i = 0; i < BH_LRU_SIZE; i++) { brelse(b->bhs[i]); b->bhs[i] = NULL; } this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); per_cpu(bh_accounting, cpu).nr = 0; return 0; } /** * bh_uptodate_or_lock - Test whether the buffer is uptodate * @bh: struct buffer_head * * Return true if the buffer is up-to-date and false, * with the buffer locked, if not. */ int bh_uptodate_or_lock(struct buffer_head *bh) { if (!buffer_uptodate(bh)) { lock_buffer(bh); if (!buffer_uptodate(bh)) return 0; unlock_buffer(bh); } return 1; } EXPORT_SYMBOL(bh_uptodate_or_lock); /** * __bh_read - Submit read for a locked buffer * @bh: struct buffer_head * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ * @wait: wait until reading finish * * Returns zero on success or don't wait, and -EIO on error. */ int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) { int ret = 0; BUG_ON(!buffer_locked(bh)); get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(REQ_OP_READ | op_flags, bh); if (wait) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) ret = -EIO; } return ret; } EXPORT_SYMBOL(__bh_read); /** * __bh_read_batch - Submit read for a batch of unlocked buffers * @nr: entry number of the buffer batch * @bhs: a batch of struct buffer_head * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ * @force_lock: force to get a lock on the buffer if set, otherwise drops any * buffer that cannot lock. * * Returns zero on success or don't wait, and -EIO on error. */ void __bh_read_batch(int nr, struct buffer_head *bhs[], blk_opf_t op_flags, bool force_lock) { int i; for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; if (buffer_uptodate(bh)) continue; if (force_lock) lock_buffer(bh); else if (!trylock_buffer(bh)) continue; if (buffer_uptodate(bh)) { unlock_buffer(bh); continue; } bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(REQ_OP_READ | op_flags, bh); } } EXPORT_SYMBOL(__bh_read_batch); void __init buffer_init(void) { unsigned long nrpages; int ret; bh_cachep = KMEM_CACHE(buffer_head, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC); /* * Limit the bh occupancy to 10% of ZONE_NORMAL */ nrpages = (nr_free_buffer_pages() * 10) / 100; max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", NULL, buffer_exit_cpu_dead); WARN_ON(ret < 0); }
5 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 /* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #ifndef _FS_FUSE_I_H #define _FS_FUSE_I_H #ifndef pr_fmt # define pr_fmt(fmt) "fuse: " fmt #endif #include <linux/fuse.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/backing-dev.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/rbtree.h> #include <linux/poll.h> #include <linux/workqueue.h> #include <linux/kref.h> #include <linux/xattr.h> #include <linux/pid_namespace.h> #include <linux/refcount.h> #include <linux/user_namespace.h> /** Default max number of pages that can be used in a single read request */ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN /** Maximum length of a filename, not including terminating null */ /* maximum, small enough for FUSE_MIN_READ_BUFFER*/ #define FUSE_NAME_LOW_MAX 1024 /* maximum, but needs a request buffer > FUSE_MIN_READ_BUFFER */ #define FUSE_NAME_MAX (PATH_MAX - 1) /** Number of dentries for each connection in the control filesystem */ #define FUSE_CTL_NUM_DENTRIES 5 /* Frequency (in seconds) of request timeout checks, if opted into */ #define FUSE_TIMEOUT_TIMER_FREQ 15 /** Frequency (in jiffies) of request timeout checks, if opted into */ extern const unsigned long fuse_timeout_timer_freq; /** Maximum of max_pages received in init_out */ extern unsigned int fuse_max_pages_limit; /* * Default timeout (in seconds) for the server to reply to a request * before the connection is aborted, if no timeout was specified on mount. */ extern unsigned int fuse_default_req_timeout; /* * Max timeout (in seconds) for the server to reply to a request before * the connection is aborted. */ extern unsigned int fuse_max_req_timeout; /** List of active connections */ extern struct list_head fuse_conn_list; /** Global mutex protecting fuse_conn_list and the control filesystem */ extern struct mutex fuse_mutex; /** Module parameters */ extern unsigned int max_user_bgreq; extern unsigned int max_user_congthresh; /* One forget request */ struct fuse_forget_link { struct fuse_forget_one forget_one; struct fuse_forget_link *next; }; /* Submount lookup tracking */ struct fuse_submount_lookup { /** Refcount */ refcount_t count; /** Unique ID, which identifies the inode between userspace * and kernel */ u64 nodeid; /** The request used for sending the FORGET message */ struct fuse_forget_link *forget; }; /** Container for data related to mapping to backing file */ struct fuse_backing { struct file *file; struct cred *cred; /** refcount */ refcount_t count; struct rcu_head rcu; }; /** FUSE inode */ struct fuse_inode { /** Inode data */ struct inode inode; /** Unique ID, which identifies the inode between userspace * and kernel */ u64 nodeid; /** Number of lookups on this inode */ u64 nlookup; /** The request used for sending the FORGET message */ struct fuse_forget_link *forget; /** Time in jiffies until the file attributes are valid */ u64 i_time; /* Which attributes are invalid */ u32 inval_mask; /** The sticky bit in inode->i_mode may have been removed, so preserve the original mode */ umode_t orig_i_mode; /* Cache birthtime */ struct timespec64 i_btime; /** 64 bit inode number */ u64 orig_ino; /** Version of last attribute change */ u64 attr_version; union { /* read/write io cache (regular file only) */ struct { /* Files usable in writepage. Protected by fi->lock */ struct list_head write_files; /* Writepages pending on truncate or fsync */ struct list_head queued_writes; /* Number of sent writes, a negative bias * (FUSE_NOWRITE) means more writes are blocked */ int writectr; /** Number of files/maps using page cache */ int iocachectr; /* Waitq for writepage completion */ wait_queue_head_t page_waitq; /* waitq for direct-io completion */ wait_queue_head_t direct_io_waitq; }; /* readdir cache (directory only) */ struct { /* true if fully cached */ bool cached; /* size of cache */ loff_t size; /* position at end of cache (position of next entry) */ loff_t pos; /* version of the cache */ u64 version; /* modification time of directory when cache was * started */ struct timespec64 mtime; /* iversion of directory when cache was started */ u64 iversion; /* protects above fields */ spinlock_t lock; } rdc; }; /** Miscellaneous bits describing inode state */ unsigned long state; /** Lock for serializing lookup and readdir for back compatibility*/ struct mutex mutex; /** Lock to protect write related fields */ spinlock_t lock; #ifdef CONFIG_FUSE_DAX /* * Dax specific inode data */ struct fuse_inode_dax *dax; #endif /** Submount specific lookup tracking */ struct fuse_submount_lookup *submount_lookup; #ifdef CONFIG_FUSE_PASSTHROUGH /** Reference to backing file in passthrough mode */ struct fuse_backing *fb; #endif /* * The underlying inode->i_blkbits value will not be modified, * so preserve the blocksize specified by the server. */ u8 cached_i_blkbits; }; /** FUSE inode state bits */ enum { /** Advise readdirplus */ FUSE_I_ADVISE_RDPLUS, /** Initialized with readdirplus */ FUSE_I_INIT_RDPLUS, /** An operation changing file size is in progress */ FUSE_I_SIZE_UNSTABLE, /* Bad inode */ FUSE_I_BAD, /* Has btime */ FUSE_I_BTIME, /* Wants or already has page cache IO */ FUSE_I_CACHE_IO_MODE, }; struct fuse_conn; struct fuse_mount; union fuse_file_args; /** FUSE specific file data */ struct fuse_file { /** Fuse connection for this file */ struct fuse_mount *fm; /* Argument space reserved for open/release */ union fuse_file_args *args; /** Kernel file handle guaranteed to be unique */ u64 kh; /** File handle used by userspace */ u64 fh; /** Node id of this file */ u64 nodeid; /** Refcount */ refcount_t count; /** FOPEN_* flags returned by open */ u32 open_flags; /** Entry on inode's write_files list */ struct list_head write_entry; /* Readdir related */ struct { /* Dir stream position */ loff_t pos; /* Offset in cache */ loff_t cache_off; /* Version of cache we are reading */ u64 version; } readdir; /** RB node to be linked on fuse_conn->polled_files */ struct rb_node polled_node; /** Wait queue head for poll */ wait_queue_head_t poll_wait; /** Does file hold a fi->iocachectr refcount? */ enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode; #ifdef CONFIG_FUSE_PASSTHROUGH /** Reference to backing file in passthrough mode */ struct file *passthrough; const struct cred *cred; #endif /** Has flock been performed on this file? */ bool flock:1; }; /** One input argument of a request */ struct fuse_in_arg { unsigned size; const void *value; }; /** One output argument of a request */ struct fuse_arg { unsigned size; void *value; }; /** FUSE folio descriptor */ struct fuse_folio_desc { unsigned int length; unsigned int offset; }; struct fuse_args { uint64_t nodeid; uint32_t opcode; uint8_t in_numargs; uint8_t out_numargs; uint8_t ext_idx; bool force:1; bool noreply:1; bool nocreds:1; bool in_pages:1; bool out_pages:1; bool user_pages:1; bool out_argvar:1; bool page_zeroing:1; bool page_replace:1; bool may_block:1; bool is_ext:1; bool is_pinned:1; bool invalidate_vmap:1; struct fuse_in_arg in_args[4]; struct fuse_arg out_args[2]; void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error); /* Used for kvec iter backed by vmalloc address */ void *vmap_base; }; struct fuse_args_pages { struct fuse_args args; struct folio **folios; struct fuse_folio_desc *descs; unsigned int num_folios; }; struct fuse_release_args { struct fuse_args args; struct fuse_release_in inarg; struct inode *inode; }; union fuse_file_args { /* Used during open() */ struct fuse_open_out open_outarg; /* Used during release() */ struct fuse_release_args release_args; }; #define FUSE_ARGS(args) struct fuse_args args = {} /** The request IO state (for asynchronous processing) */ struct fuse_io_priv { struct kref refcnt; int async; spinlock_t lock; unsigned reqs; ssize_t bytes; size_t size; __u64 offset; bool write; bool should_dirty; int err; struct kiocb *iocb; struct completion *done; bool blocking; }; #define FUSE_IO_PRIV_SYNC(i) \ { \ .refcnt = KREF_INIT(1), \ .async = 0, \ .iocb = i, \ } /** * Request flags * * FR_ISREPLY: set if the request has reply * FR_FORCE: force sending of the request even if interrupted * FR_BACKGROUND: request is sent in the background * FR_WAITING: request is counted as "waiting" * FR_ABORTED: the request was aborted * FR_INTERRUPTED: the request has been interrupted * FR_LOCKED: data is being copied to/from the request * FR_PENDING: request is not yet in userspace * FR_SENT: request is in userspace, waiting for an answer * FR_FINISHED: request is finished * FR_PRIVATE: request is on private list * FR_ASYNC: request is asynchronous * FR_URING: request is handled through fuse-io-uring */ enum fuse_req_flag { FR_ISREPLY, FR_FORCE, FR_BACKGROUND, FR_WAITING, FR_ABORTED, FR_INTERRUPTED, FR_LOCKED, FR_PENDING, FR_SENT, FR_FINISHED, FR_PRIVATE, FR_ASYNC, FR_URING, }; /** * A request to the client * * .waitq.lock protects the following fields: * - FR_ABORTED * - FR_LOCKED (may also be modified under fc->lock, tested under both) */ struct fuse_req { /** This can be on either pending processing or io lists in fuse_conn */ struct list_head list; /** Entry on the interrupts list */ struct list_head intr_entry; /* Input/output arguments */ struct fuse_args *args; /** refcount */ refcount_t count; /* Request flags, updated with test/set/clear_bit() */ unsigned long flags; /* The request input header */ struct { struct fuse_in_header h; } in; /* The request output header */ struct { struct fuse_out_header h; } out; /** Used to wake up the task waiting for completion of request*/ wait_queue_head_t waitq; #if IS_ENABLED(CONFIG_VIRTIO_FS) /** virtio-fs's physically contiguous buffer for in and out args */ void *argbuf; #endif /** fuse_mount this request belongs to */ struct fuse_mount *fm; #ifdef CONFIG_FUSE_IO_URING void *ring_entry; void *ring_queue; #endif /** When (in jiffies) the request was created */ unsigned long create_time; }; struct fuse_iqueue; /** * Input queue callbacks * * Input queue signalling is device-specific. For example, the /dev/fuse file * uses fiq->waitq and fasync to wake processes that are waiting on queue * readiness. These callbacks allow other device types to respond to input * queue activity. */ struct fuse_iqueue_ops { /** * Send one forget */ void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link); /** * Send interrupt for request */ void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req); /** * Send one request */ void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req); /** * Clean up when fuse_iqueue is destroyed */ void (*release)(struct fuse_iqueue *fiq); }; /** /dev/fuse input queue operations */ extern const struct fuse_iqueue_ops fuse_dev_fiq_ops; struct fuse_iqueue { /** Connection established */ unsigned connected; /** Lock protecting accesses to members of this structure */ spinlock_t lock; /** Readers of the connection are waiting on this */ wait_queue_head_t waitq; /** The next unique request id */ u64 reqctr; /** The list of pending requests */ struct list_head pending; /** Pending interrupts */ struct list_head interrupts; /** Queue of pending forgets */ struct fuse_forget_link forget_list_head; struct fuse_forget_link *forget_list_tail; /** Batching of FORGET requests (positive indicates FORGET batch) */ int forget_batch; /** O_ASYNC requests */ struct fasync_struct *fasync; /** Device-specific callbacks */ const struct fuse_iqueue_ops *ops; /** Device-specific state */ void *priv; }; #define FUSE_PQ_HASH_BITS 8 #define FUSE_PQ_HASH_SIZE (1 << FUSE_PQ_HASH_BITS) struct fuse_pqueue { /** Connection established */ unsigned connected; /** Lock protecting accessess to members of this structure */ spinlock_t lock; /** Hash table of requests being processed */ struct list_head *processing; /** The list of requests under I/O */ struct list_head io; }; /** * Fuse device instance */ struct fuse_dev { /** Fuse connection for this device */ struct fuse_conn *fc; /** Processing queue */ struct fuse_pqueue pq; /** list entry on fc->devices */ struct list_head entry; }; enum fuse_dax_mode { FUSE_DAX_INODE_DEFAULT, /* default */ FUSE_DAX_ALWAYS, /* "-o dax=always" */ FUSE_DAX_NEVER, /* "-o dax=never" */ FUSE_DAX_INODE_USER, /* "-o dax=inode" */ }; static inline bool fuse_is_inode_dax_mode(enum fuse_dax_mode mode) { return mode == FUSE_DAX_INODE_DEFAULT || mode == FUSE_DAX_INODE_USER; } struct fuse_fs_context { int fd; struct file *file; unsigned int rootmode; kuid_t user_id; kgid_t group_id; bool is_bdev:1; bool fd_present:1; bool rootmode_present:1; bool user_id_present:1; bool group_id_present:1; bool default_permissions:1; bool allow_other:1; bool destroy:1; bool no_control:1; bool no_force_umount:1; bool legacy_opts_show:1; enum fuse_dax_mode dax_mode; unsigned int max_read; unsigned int blksize; const char *subtype; /* DAX device, may be NULL */ struct dax_device *dax_dev; /* fuse_dev pointer to fill in, should contain NULL on entry */ void **fudptr; }; struct fuse_sync_bucket { /* count is a possible scalability bottleneck */ atomic_t count; wait_queue_head_t waitq; struct rcu_head rcu; }; /** * A Fuse connection. * * This structure is created, when the root filesystem is mounted, and * is destroyed, when the client device is closed and the last * fuse_mount is destroyed. */ struct fuse_conn { /** Lock protecting accessess to members of this structure */ spinlock_t lock; /** Refcount */ refcount_t count; /** Number of fuse_dev's */ atomic_t dev_count; /** Current epoch for up-to-date dentries */ atomic_t epoch; struct rcu_head rcu; /** The user id for this mount */ kuid_t user_id; /** The group id for this mount */ kgid_t group_id; /** The pid namespace for this mount */ struct pid_namespace *pid_ns; /** The user namespace for this mount */ struct user_namespace *user_ns; /** Maximum read size */ unsigned max_read; /** Maximum write size */ unsigned max_write; /** Maximum number of pages that can be used in a single request */ unsigned int max_pages; /** Constrain ->max_pages to this value during feature negotiation */ unsigned int max_pages_limit; /** Input queue */ struct fuse_iqueue iq; /** The next unique kernel file handle */ atomic64_t khctr; /** rbtree of fuse_files waiting for poll events indexed by ph */ struct rb_root polled_files; /** Maximum number of outstanding background requests */ unsigned max_background; /** Number of background requests at which congestion starts */ unsigned congestion_threshold; /** Number of requests currently in the background */ unsigned num_background; /** Number of background requests currently queued for userspace */ unsigned active_background; /** The list of background requests set aside for later queuing */ struct list_head bg_queue; /** Protects: max_background, congestion_threshold, num_background, * active_background, bg_queue, blocked */ spinlock_t bg_lock; /** Flag indicating that INIT reply has been received. Allocating * any fuse request will be suspended until the flag is set */ int initialized; /** Flag indicating if connection is blocked. This will be the case before the INIT reply is received, and if there are too many outstading backgrounds requests */ int blocked; /** waitq for blocked connection */ wait_queue_head_t blocked_waitq; /** Connection established, cleared on umount, connection abort and device release */ unsigned connected; /** Connection aborted via sysfs */ bool aborted; /** Connection failed (version mismatch). Cannot race with setting other bitfields since it is only set once in INIT reply, before any other request, and never cleared */ unsigned conn_error:1; /** Connection successful. Only set in INIT */ unsigned conn_init:1; /** Do readahead asynchronously? Only set in INIT */ unsigned async_read:1; /** Return an unique read error after abort. Only set in INIT */ unsigned abort_err:1; /** Do not send separate SETATTR request before open(O_TRUNC) */ unsigned atomic_o_trunc:1; /** Filesystem supports NFS exporting. Only set in INIT */ unsigned export_support:1; /** write-back cache policy (default is write-through) */ unsigned writeback_cache:1; /** allow parallel lookups and readdir (default is serialized) */ unsigned parallel_dirops:1; /** handle fs handles killing suid/sgid/cap on write/chown/trunc */ unsigned handle_killpriv:1; /** cache READLINK responses in page cache */ unsigned cache_symlinks:1; /* show legacy mount options */ unsigned int legacy_opts_show:1; /* * fs kills suid/sgid/cap on write/chown/trunc. suid is killed on * write/trunc only if caller did not have CAP_FSETID. sgid is killed * on write/truncate only if caller did not have CAP_FSETID as well as * file has group execute permission. */ unsigned handle_killpriv_v2:1; /* * The following bitfields are only for optimization purposes * and hence races in setting them will not cause malfunction */ /** Is open/release not implemented by fs? */ unsigned no_open:1; /** Is opendir/releasedir not implemented by fs? */ unsigned no_opendir:1; /** Is fsync not implemented by fs? */ unsigned no_fsync:1; /** Is fsyncdir not implemented by fs? */ unsigned no_fsyncdir:1; /** Is flush not implemented by fs? */ unsigned no_flush:1; /** Is setxattr not implemented by fs? */ unsigned no_setxattr:1; /** Does file server support extended setxattr */ unsigned setxattr_ext:1; /** Is getxattr not implemented by fs? */ unsigned no_getxattr:1; /** Is listxattr not implemented by fs? */ unsigned no_listxattr:1; /** Is removexattr not implemented by fs? */ unsigned no_removexattr:1; /** Are posix file locking primitives not implemented by fs? */ unsigned no_lock:1; /** Is access not implemented by fs? */ unsigned no_access:1; /** Is create not implemented by fs? */ unsigned no_create:1; /** Is interrupt not implemented by fs? */ unsigned no_interrupt:1; /** Is bmap not implemented by fs? */ unsigned no_bmap:1; /** Is poll not implemented by fs? */ unsigned no_poll:1; /** Do multi-page cached writes */ unsigned big_writes:1; /** Don't apply umask to creation modes */ unsigned dont_mask:1; /** Are BSD file locking primitives not implemented by fs? */ unsigned no_flock:1; /** Is fallocate not implemented by fs? */ unsigned no_fallocate:1; /** Is rename with flags implemented by fs? */ unsigned no_rename2:1; /** Use enhanced/automatic page cache invalidation. */ unsigned auto_inval_data:1; /** Filesystem is fully responsible for page cache invalidation. */ unsigned explicit_inval_data:1; /** Does the filesystem support readdirplus? */ unsigned do_readdirplus:1; /** Does the filesystem want adaptive readdirplus? */ unsigned readdirplus_auto:1; /** Does the filesystem support asynchronous direct-IO submission? */ unsigned async_dio:1; /** Is lseek not implemented by fs? */ unsigned no_lseek:1; /** Does the filesystem support posix acls? */ unsigned posix_acl:1; /** Check permissions based on the file mode or not? */ unsigned default_permissions:1; /** Allow other than the mounter user to access the filesystem ? */ unsigned allow_other:1; /** Does the filesystem support copy_file_range? */ unsigned no_copy_file_range:1; /** Does the filesystem support copy_file_range_64? */ unsigned no_copy_file_range_64:1; /* Send DESTROY request */ unsigned int destroy:1; /* Delete dentries that have gone stale */ unsigned int delete_stale:1; /** Do not create entry in fusectl fs */ unsigned int no_control:1; /** Do not allow MNT_FORCE umount */ unsigned int no_force_umount:1; /* Auto-mount submounts announced by the server */ unsigned int auto_submounts:1; /* Propagate syncfs() to server */ unsigned int sync_fs:1; /* Initialize security xattrs when creating a new inode */ unsigned int init_security:1; /* Add supplementary group info when creating a new inode */ unsigned int create_supp_group:1; /* Does the filesystem support per inode DAX? */ unsigned int inode_dax:1; /* Is tmpfile not implemented by fs? */ unsigned int no_tmpfile:1; /* Relax restrictions to allow shared mmap in FOPEN_DIRECT_IO mode */ unsigned int direct_io_allow_mmap:1; /* Is statx not implemented by fs? */ unsigned int no_statx:1; /** Passthrough support for read/write IO */ unsigned int passthrough:1; /* Use pages instead of pointer for kernel I/O */ unsigned int use_pages_for_kvec_io:1; /* Is link not implemented by fs? */ unsigned int no_link:1; /* Is synchronous FUSE_INIT allowed? */ unsigned int sync_init:1; /* Use io_uring for communication */ unsigned int io_uring; /** Maximum stack depth for passthrough backing files */ int max_stack_depth; /** The number of requests waiting for completion */ atomic_t num_waiting; /** Negotiated minor version */ unsigned minor; /** Entry on the fuse_conn_list */ struct list_head entry; /** Device ID from the root super block */ dev_t dev; /** Key for lock owner ID scrambling */ u32 scramble_key[4]; /** Version counter for attribute changes */ atomic64_t attr_version; /** Version counter for evict inode */ atomic64_t evict_ctr; /* maximum file name length */ u32 name_max; /** Called on final put */ void (*release)(struct fuse_conn *); /** * Read/write semaphore to hold when accessing the sb of any * fuse_mount belonging to this connection */ struct rw_semaphore killsb; /** List of device instances belonging to this connection */ struct list_head devices; #ifdef CONFIG_FUSE_DAX /* Dax mode */ enum fuse_dax_mode dax_mode; /* Dax specific conn data, non-NULL if DAX is enabled */ struct fuse_conn_dax *dax; #endif /** List of filesystems using this connection */ struct list_head mounts; /* New writepages go into this bucket */ struct fuse_sync_bucket __rcu *curr_bucket; #ifdef CONFIG_FUSE_PASSTHROUGH /** IDR for backing files ids */ struct idr backing_files_map; #endif #ifdef CONFIG_FUSE_IO_URING /** uring connection information*/ struct fuse_ring *ring; #endif /** Only used if the connection opts into request timeouts */ struct { /* Worker for checking if any requests have timed out */ struct delayed_work work; /* Request timeout (in jiffies). 0 = no timeout */ unsigned int req_timeout; } timeout; /* * This is a workaround until fuse uses iomap for reads. * For fuseblk servers, this represents the blocksize passed in at * mount time and for regular fuse servers, this is equivalent to * inode->i_blkbits. */ u8 blkbits; }; /* * Represents a mounted filesystem, potentially a submount. * * This object allows sharing a fuse_conn between separate mounts to * allow submounts with dedicated superblocks and thus separate device * IDs. */ struct fuse_mount { /* Underlying (potentially shared) connection to the FUSE server */ struct fuse_conn *fc; /* * Super block for this connection (fc->killsb must be held when * accessing this). */ struct super_block *sb; /* Entry on fc->mounts */ struct list_head fc_entry; struct rcu_head rcu; }; /* * Empty header for FUSE opcodes without specific header needs. * Used as a placeholder in args->in_args[0] for consistency * across all FUSE operations, simplifying request handling. */ struct fuse_zero_header {}; static inline void fuse_set_zero_arg0(struct fuse_args *args) { args->in_args[0].size = sizeof(struct fuse_zero_header); args->in_args[0].value = NULL; } static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb) { return sb->s_fs_info; } static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) { return get_fuse_mount_super(sb)->fc; } static inline struct fuse_mount *get_fuse_mount(struct inode *inode) { return get_fuse_mount_super(inode->i_sb); } static inline struct fuse_conn *get_fuse_conn(struct inode *inode) { return get_fuse_mount_super(inode->i_sb)->fc; } static inline struct fuse_inode *get_fuse_inode(struct inode *inode) { return container_of(inode, struct fuse_inode, inode); } static inline u64 get_node_id(struct inode *inode) { return get_fuse_inode(inode)->nodeid; } static inline int invalid_nodeid(u64 nodeid) { return !nodeid || nodeid == FUSE_ROOT_ID; } static inline u64 fuse_get_attr_version(struct fuse_conn *fc) { return atomic64_read(&fc->attr_version); } static inline u64 fuse_get_evict_ctr(struct fuse_conn *fc) { return atomic64_read(&fc->evict_ctr); } static inline bool fuse_stale_inode(const struct inode *inode, int generation, struct fuse_attr *attr) { return inode->i_generation != generation || inode_wrong_type(inode, attr->mode); } static inline void fuse_make_bad(struct inode *inode) { set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); } static inline bool fuse_is_bad(struct inode *inode) { return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state)); } static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags, struct fuse_folio_desc **desc) { struct folio **folios; folios = kzalloc(nfolios * (sizeof(struct folio *) + sizeof(struct fuse_folio_desc)), flags); *desc = (void *) (folios + nfolios); return folios; } static inline void fuse_folio_descs_length_init(struct fuse_folio_desc *descs, unsigned int index, unsigned int nr_folios) { int i; for (i = index; i < index + nr_folios; i++) descs[i].length = PAGE_SIZE - descs[i].offset; } static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket) { /* Need RCU protection to prevent use after free after the decrement */ rcu_read_lock(); if (atomic_dec_and_test(&bucket->count)) wake_up(&bucket->waitq); rcu_read_unlock(); } /** Device operations */ extern const struct file_operations fuse_dev_operations; extern const struct dentry_operations fuse_dentry_operations; /** * Get a filled in inode */ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int generation, struct fuse_attr *attr, u64 attr_valid, u64 attr_version, u64 evict_ctr); int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name, struct fuse_entry_out *outarg, struct inode **inode); /** * Send FORGET command */ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, u64 nodeid, u64 nlookup); struct fuse_forget_link *fuse_alloc_forget(void); /* * Initialize READ or READDIR request */ struct fuse_io_args { union { struct { struct fuse_read_in in; u64 attr_ver; } read; struct { struct fuse_write_in in; struct fuse_write_out out; bool folio_locked; } write; }; struct fuse_args_pages ap; struct fuse_io_priv *io; struct fuse_file *ff; }; void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, size_t count, int opcode); struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); int fuse_finish_open(struct inode *inode, struct file *file); void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags); /** * Send RELEASE or RELEASEDIR request */ void fuse_release_common(struct file *file, bool isdir); /** * Send FSYNC or FSYNCDIR request */ int fuse_fsync_common(struct file *file, loff_t start, loff_t end, int datasync, int opcode); /** * Notify poll wakeup */ int fuse_notify_poll_wakeup(struct fuse_conn *fc, struct fuse_notify_poll_wakeup_out *outarg); /** * Initialize file operations on a regular file */ void fuse_init_file_inode(struct inode *inode, unsigned int flags); /** * Initialize inode operations on regular files and special files */ void fuse_init_common(struct inode *inode); /** * Initialize inode and file operations on a directory */ void fuse_init_dir(struct inode *inode); /** * Initialize inode operations on a symlink */ void fuse_init_symlink(struct inode *inode); /** * Change attributes of an inode */ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, struct fuse_statx *sx, u64 attr_valid, u64 attr_version); void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, struct fuse_statx *sx, u64 attr_valid, u32 cache_mask, u64 evict_ctr); u32 fuse_get_cache_mask(struct inode *inode); /** * Initialize the client device */ int fuse_dev_init(void); /** * Cleanup the client device */ void fuse_dev_cleanup(void); int fuse_ctl_init(void); void __exit fuse_ctl_cleanup(void); /** * Simple request sending that does request allocation and freeing */ ssize_t __fuse_simple_request(struct mnt_idmap *idmap, struct fuse_mount *fm, struct fuse_args *args); static inline ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args) { return __fuse_simple_request(&invalid_mnt_idmap, fm, args); } static inline ssize_t fuse_simple_idmap_request(struct mnt_idmap *idmap, struct fuse_mount *fm, struct fuse_args *args) { return __fuse_simple_request(idmap, fm, args); } int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args, gfp_t gfp_flags); /** * Assign a unique id to a fuse request */ void fuse_request_assign_unique(struct fuse_iqueue *fiq, struct fuse_req *req); /** * End a finished request */ void fuse_request_end(struct fuse_req *req); /* Abort all requests */ void fuse_abort_conn(struct fuse_conn *fc); void fuse_wait_aborted(struct fuse_conn *fc); /* Check if any requests timed out */ void fuse_check_timeout(struct work_struct *work); /** * Invalidate inode attributes */ /* Attributes possibly changed on data modification */ #define FUSE_STATX_MODIFY (STATX_MTIME | STATX_CTIME | STATX_BLOCKS) /* Attributes possibly changed on data and/or size modification */ #define FUSE_STATX_MODSIZE (FUSE_STATX_MODIFY | STATX_SIZE) void fuse_invalidate_attr(struct inode *inode); void fuse_invalidate_attr_mask(struct inode *inode, u32 mask); void fuse_invalidate_entry_cache(struct dentry *entry); void fuse_invalidate_atime(struct inode *inode); u64 fuse_time_to_jiffies(u64 sec, u32 nsec); #define ATTR_TIMEOUT(o) \ fuse_time_to_jiffies((o)->attr_valid, (o)->attr_valid_nsec) void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o); /** * Acquire reference to fuse_conn */ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc); /** * Initialize the fuse processing queue */ void fuse_pqueue_init(struct fuse_pqueue *fpq); /** * Initialize fuse_conn */ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, struct user_namespace *user_ns, const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv); /** * Release reference to fuse_conn */ void fuse_conn_put(struct fuse_conn *fc); struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc); struct fuse_dev *fuse_dev_alloc(void); void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc); void fuse_dev_free(struct fuse_dev *fud); int fuse_send_init(struct fuse_mount *fm); /** * Fill in superblock and initialize fuse connection * @sb: partially-initialized superblock to fill in * @ctx: mount context */ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx); /* * Remove the mount from the connection * * Returns whether this was the last mount */ bool fuse_mount_remove(struct fuse_mount *fm); /* * Setup context ops for submounts */ int fuse_init_fs_context_submount(struct fs_context *fsc); /* * Shut down the connection (possibly sending DESTROY request). */ void fuse_conn_destroy(struct fuse_mount *fm); /* Drop the connection and free the fuse mount */ void fuse_mount_destroy(struct fuse_mount *fm); /** * Add connection to control filesystem */ int fuse_ctl_add_conn(struct fuse_conn *fc); /** * Remove connection from control filesystem */ void fuse_ctl_remove_conn(struct fuse_conn *fc); /** * Is file type valid? */ int fuse_valid_type(int m); bool fuse_invalid_attr(struct fuse_attr *attr); /** * Is current process allowed to perform filesystem operation? */ bool fuse_allow_current_process(struct fuse_conn *fc); u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id); void fuse_flush_time_update(struct inode *inode); void fuse_update_ctime(struct inode *inode); int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask); void fuse_flush_writepages(struct inode *inode); void fuse_set_nowrite(struct inode *inode); void fuse_release_nowrite(struct inode *inode); /** * Scan all fuse_mounts belonging to fc to find the first where * ilookup5() returns a result. Return that result and the * respective fuse_mount in *fm (unless fm is NULL). * * The caller must hold fc->killsb. */ struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid, struct fuse_mount **fm); /** * File-system tells the kernel to invalidate cache for the given node id. */ int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid, loff_t offset, loff_t len); /** * File-system tells the kernel to invalidate parent attributes and * the dentry matching parent/name. * * If the child_nodeid is non-zero and: * - matches the inode number for the dentry matching parent/name, * - is not a mount point * - is a file or oan empty directory * then the dentry is unhashed (d_delete()). */ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, u64 child_nodeid, struct qstr *name, u32 flags); /* * Try to prune this inode. If neither the inode itself nor dentries associated * with this inode have any external reference, then the inode can be freed. */ void fuse_try_prune_one_inode(struct fuse_conn *fc, u64 nodeid); int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file, bool isdir); /** * fuse_direct_io() flags */ /** If set, it is WRITE; otherwise - READ */ #define FUSE_DIO_WRITE (1 << 0) /** CUSE pass fuse_direct_io() a file which f_mapping->host is not from FUSE */ #define FUSE_DIO_CUSE (1 << 1) ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, loff_t *ppos, int flags); long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags); long fuse_ioctl_common(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags); __poll_t fuse_file_poll(struct file *file, poll_table *wait); int fuse_dev_release(struct inode *inode, struct file *file); bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written); int fuse_flush_times(struct inode *inode, struct fuse_file *ff); int fuse_write_inode(struct inode *inode, struct writeback_control *wbc); int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr, struct file *file); void fuse_set_initialized(struct fuse_conn *fc); void fuse_unlock_inode(struct inode *inode, bool locked); bool fuse_lock_inode(struct inode *inode); int fuse_setxattr(struct inode *inode, const char *name, const void *value, size_t size, int flags, unsigned int extra_flags); ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value, size_t size); ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size); int fuse_removexattr(struct inode *inode, const char *name); extern const struct xattr_handler * const fuse_xattr_handlers[]; struct posix_acl; struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu); struct posix_acl *fuse_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, int type); int fuse_set_acl(struct mnt_idmap *, struct dentry *dentry, struct posix_acl *acl, int type); /* readdir.c */ int fuse_readdir(struct file *file, struct dir_context *ctx); /** * Return the number of bytes in an arguments list */ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args); /** * Get the next unique ID for a request */ u64 fuse_get_unique(struct fuse_iqueue *fiq); void fuse_free_conn(struct fuse_conn *fc); /* dax.c */ #define FUSE_IS_DAX(inode) (IS_ENABLED(CONFIG_FUSE_DAX) && IS_DAX(inode)) ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to); ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from); int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma); int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end); int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode mode, struct dax_device *dax_dev); void fuse_dax_conn_free(struct fuse_conn *fc); bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi); void fuse_dax_inode_init(struct inode *inode, unsigned int flags); void fuse_dax_inode_cleanup(struct inode *inode); void fuse_dax_dontcache(struct inode *inode, unsigned int flags); bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment); void fuse_dax_cancel_work(struct fuse_conn *fc); /* ioctl.c */ long fuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int fuse_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct file_kattr *fa); /* iomode.c */ int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff); int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb); void fuse_inode_uncached_io_end(struct fuse_inode *fi); int fuse_file_io_open(struct file *file, struct inode *inode); void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); /* file.c */ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, bool isdir); void fuse_file_release(struct inode *inode, struct fuse_file *ff, unsigned int open_flags, fl_owner_t id, bool isdir); /* backing.c */ #ifdef CONFIG_FUSE_PASSTHROUGH struct fuse_backing *fuse_backing_get(struct fuse_backing *fb); void fuse_backing_put(struct fuse_backing *fb); struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc, int backing_id); #else static inline struct fuse_backing *fuse_backing_get(struct fuse_backing *fb) { return NULL; } static inline void fuse_backing_put(struct fuse_backing *fb) { } static inline struct fuse_backing *fuse_backing_lookup(struct fuse_conn *fc, int backing_id) { return NULL; } #endif void fuse_backing_files_init(struct fuse_conn *fc); void fuse_backing_files_free(struct fuse_conn *fc); int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map); int fuse_backing_close(struct fuse_conn *fc, int backing_id); /* passthrough.c */ static inline struct fuse_backing *fuse_inode_backing(struct fuse_inode *fi) { #ifdef CONFIG_FUSE_PASSTHROUGH return READ_ONCE(fi->fb); #else return NULL; #endif } static inline struct fuse_backing *fuse_inode_backing_set(struct fuse_inode *fi, struct fuse_backing *fb) { #ifdef CONFIG_FUSE_PASSTHROUGH return xchg(&fi->fb, fb); #else return NULL; #endif } struct fuse_backing *fuse_passthrough_open(struct file *file, int backing_id); void fuse_passthrough_release(struct fuse_file *ff, struct fuse_backing *fb); static inline struct file *fuse_file_passthrough(struct fuse_file *ff) { #ifdef CONFIG_FUSE_PASSTHROUGH return ff->passthrough; #else return NULL; #endif } ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *iter); ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *iter); ssize_t fuse_passthrough_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); ssize_t fuse_passthrough_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags); ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma); #ifdef CONFIG_SYSCTL extern int fuse_sysctl_register(void); extern void fuse_sysctl_unregister(void); #else #define fuse_sysctl_register() (0) #define fuse_sysctl_unregister() do { } while (0) #endif /* CONFIG_SYSCTL */ #endif /* _FS_FUSE_I_H */
10 7 1 11 11 10 10 10 10 4 10 4 3 2 2 1 7 7 1 6 6 6 6 1 1 2 6 3 7 5 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 // SPDX-License-Identifier: GPL-2.0-only #include <linux/module.h> #include <linux/inet_diag.h> #include <linux/sock_diag.h> #include <net/inet_sock.h> #include <net/raw.h> #include <net/rawv6.h> #ifdef pr_fmt # undef pr_fmt #endif #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt static struct raw_hashinfo * raw_get_hashinfo(const struct inet_diag_req_v2 *r) { if (r->sdiag_family == AF_INET) { return &raw_v4_hashinfo; #if IS_ENABLED(CONFIG_IPV6) } else if (r->sdiag_family == AF_INET6) { return &raw_v6_hashinfo; #endif } else { return ERR_PTR(-EINVAL); } } /* * Due to requirement of not breaking user API we can't simply * rename @pad field in inet_diag_req_v2 structure, instead * use helper to figure it out. */ static bool raw_lookup(struct net *net, const struct sock *sk, const struct inet_diag_req_v2 *req) { struct inet_diag_req_raw *r = (void *)req; if (r->sdiag_family == AF_INET) return raw_v4_match(net, sk, r->sdiag_raw_protocol, r->id.idiag_dst[0], r->id.idiag_src[0], r->id.idiag_if, 0); #if IS_ENABLED(CONFIG_IPV6) else return raw_v6_match(net, sk, r->sdiag_raw_protocol, (const struct in6_addr *)r->id.idiag_src, (const struct in6_addr *)r->id.idiag_dst, r->id.idiag_if, 0); #endif return false; } static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r) { struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); struct hlist_head *hlist; struct sock *sk; int slot; if (IS_ERR(hashinfo)) return ERR_CAST(hashinfo); rcu_read_lock(); for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) { hlist = &hashinfo->ht[slot]; sk_for_each_rcu(sk, hlist) { if (raw_lookup(net, sk, r)) { /* * Grab it and keep until we fill * diag message to be reported, so * caller should call sock_put then. */ if (refcount_inc_not_zero(&sk->sk_refcnt)) goto out_unlock; } } } sk = ERR_PTR(-ENOENT); out_unlock: rcu_read_unlock(); return sk; } static int raw_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { struct sk_buff *in_skb = cb->skb; struct sk_buff *rep; struct sock *sk; struct net *net; int err; net = sock_net(in_skb->sk); sk = raw_sock_get(net, r); if (IS_ERR(sk)) return PTR_ERR(sk); rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + inet_diag_msg_attrs_size() + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) { sock_put(sk); return -ENOMEM; } err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0, netlink_net_capable(in_skb, CAP_NET_ADMIN)); sock_put(sk); if (err < 0) { kfree_skb(rep); return err; } err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); return err; } static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, bool net_admin) { if (!inet_diag_bc_sk(cb->data, sk)) return 0; return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin); } static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); struct net *net = sock_net(skb->sk); int num, s_num, slot, s_slot; struct hlist_head *hlist; struct sock *sk = NULL; if (IS_ERR(hashinfo)) return; s_slot = cb->args[0]; num = s_num = cb->args[1]; rcu_read_lock(); for (slot = s_slot; slot < RAW_HTABLE_SIZE; s_num = 0, slot++) { num = 0; hlist = &hashinfo->ht[slot]; sk_for_each_rcu(sk, hlist) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) continue; if (num < s_num) goto next; if (sk->sk_family != r->sdiag_family) goto next; if (r->id.idiag_sport != inet->inet_sport && r->id.idiag_sport) goto next; if (r->id.idiag_dport != inet->inet_dport && r->id.idiag_dport) goto next; if (sk_diag_dump(sk, skb, cb, r, net_admin) < 0) goto out_unlock; next: num++; } } out_unlock: rcu_read_unlock(); cb->args[0] = slot; cb->args[1] = num; } static void raw_diag_get_info(struct sock *sk, struct inet_diag_msg *r, void *info) { r->idiag_rqueue = sk_rmem_alloc_get(sk); r->idiag_wqueue = sk_wmem_alloc_get(sk); } #ifdef CONFIG_INET_DIAG_DESTROY static int raw_diag_destroy(struct sk_buff *in_skb, const struct inet_diag_req_v2 *r) { struct net *net = sock_net(in_skb->sk); struct sock *sk; int err; sk = raw_sock_get(net, r); if (IS_ERR(sk)) return PTR_ERR(sk); err = sock_diag_destroy(sk, ECONNABORTED); sock_put(sk); return err; } #endif static const struct inet_diag_handler raw_diag_handler = { .owner = THIS_MODULE, .dump = raw_diag_dump, .dump_one = raw_diag_dump_one, .idiag_get_info = raw_diag_get_info, .idiag_type = IPPROTO_RAW, .idiag_info_size = 0, #ifdef CONFIG_INET_DIAG_DESTROY .destroy = raw_diag_destroy, #endif }; static void __always_unused __check_inet_diag_req_raw(void) { /* * Make sure the two structures are identical, * except the @pad field. */ #define __offset_mismatch(m1, m2) \ (offsetof(struct inet_diag_req_v2, m1) != \ offsetof(struct inet_diag_req_raw, m2)) BUILD_BUG_ON(sizeof(struct inet_diag_req_v2) != sizeof(struct inet_diag_req_raw)); BUILD_BUG_ON(__offset_mismatch(sdiag_family, sdiag_family)); BUILD_BUG_ON(__offset_mismatch(sdiag_protocol, sdiag_protocol)); BUILD_BUG_ON(__offset_mismatch(idiag_ext, idiag_ext)); BUILD_BUG_ON(__offset_mismatch(pad, sdiag_raw_protocol)); BUILD_BUG_ON(__offset_mismatch(idiag_states, idiag_states)); BUILD_BUG_ON(__offset_mismatch(id, id)); #undef __offset_mismatch } static int __init raw_diag_init(void) { return inet_diag_register(&raw_diag_handler); } static void __exit raw_diag_exit(void) { inet_diag_unregister(&raw_diag_handler); } module_init(raw_diag_init); module_exit(raw_diag_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RAW socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-255 /* AF_INET - IPPROTO_RAW */); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10-255 /* AF_INET6 - IPPROTO_RAW */);
2 10 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 1 4 1 2 2 4 1 1 4 4 4 2 3 1 3 4 2 2 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 1 1 2 2 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 1 1 1 12 2 2 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 2 1 1 1 1 1 1 2 2 2 2 2 2 2 1 1 1 1 1 1 1 2 2 1 1 2 2 2 2 2 2 1 1 2 2 1 2 2 1 1 1 1 1 1 1 1 1 49 50 36 13 48 1 1 1 1 1 1 36 36 37 37 35 36 36 1 18 27 13 13 13 13 7 6 11 1 1 1 1 1 1 1 3 1 2 2 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 4 4 2 1 1 4 4 1 1 2 3 3 3 2 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 1 2 1 1 1 1 1 1 1 1 1 1 1 3 3 2 2 1 1 1 1 1 2 2 2 3 1 3 2 2 6 7 7 1 2 4 6 6 6 4 2 6 5 5 2 2 1 1 1 12 7 2 14 1 1 14 13 13 1 12 14 2 13 2 2 1 2 8 4 1 1 1 2 1 2 11 11 7 1 11 11 11 2 11 1 1 1 1 1 1 1 1 1 3 1 1 1 2 2 1 1 1 2 2 2 1 1 1 1 1 1 1 1 1 1 1 33 34 18 21 5 1 32 33 1 135 2 1 128 4 82 48 133 85 90 86 49 135 132 135 132 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI event handling. */ #include <linux/unaligned.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "hci_debugfs.h" #include "hci_codec.h" #include "smp.h" #include "msft.h" #include "eir.h" #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" /* Handle HCI Event packets */ static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, u8 ev, size_t len) { void *data; data = skb_pull_data(skb, len); if (!data) bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); return data; } static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, u16 op, size_t len) { void *data; data = skb_pull_data(skb, len); if (!data) bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); return data; } static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, u8 ev, size_t len) { void *data; data = skb_pull_data(skb, len); if (!data) bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); return data; } static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); /* It is possible that we receive Inquiry Complete event right * before we receive Inquiry Cancel Command Complete event, in * which case the latter event should have status of Command * Disallowed. This should not be treated as error, since * we actually achieve what Inquiry Cancel wants to achieve, * which is to end the last Inquiry session. */ if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) { bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); rp->status = 0x00; } if (rp->status) return rp->status; clear_bit(HCI_INQUIRY, &hdev->flags); smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); hci_dev_lock(hdev); /* Set discovery state to stopped if we're not doing LE active * scanning. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || hdev->le_scan_type != LE_SCAN_ACTIVE) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); return rp->status; } static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); return rp->status; } static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_remote_name_req_cancel *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); return rp->status; } static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_role_discovery *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->role = rp->role; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_link_policy *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->link_policy = __le16_to_cpu(rp->policy); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_write_link_policy *rp = data; struct hci_conn *conn; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); if (!sent) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->link_policy = get_unaligned_le16(sent + 2); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_def_link_policy *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->link_policy = __le16_to_cpu(rp->policy); return rp->status; } static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); if (!sent) return rp->status; hdev->link_policy = get_unaligned_le16(sent); return rp->status; } static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); clear_bit(HCI_RESET, &hdev->flags); if (rp->status) return rp->status; /* Reset all non-persistent flags */ hci_dev_clear_volatile_flags(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hdev->inq_tx_power = HCI_TX_POWER_INVALID; hdev->adv_tx_power = HCI_TX_POWER_INVALID; memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); hdev->adv_data_len = 0; memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); hdev->scan_rsp_data_len = 0; hdev->le_scan_type = LE_SCAN_PASSIVE; hdev->ssp_debug_mode = 0; hci_bdaddr_list_clear(&hdev->le_accept_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); return rp->status; } static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_stored_link_key *rp = data; struct hci_cp_read_stored_link_key *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); if (!sent) return rp->status; if (!rp->status && sent->read_all == 0x01) { hdev->stored_max_keys = le16_to_cpu(rp->max_keys); hdev->stored_num_keys = le16_to_cpu(rp->num_keys); } return rp->status; } static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_delete_stored_link_key *rp = data; u16 num_keys; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; num_keys = le16_to_cpu(rp->num_keys); if (num_keys <= hdev->stored_num_keys) hdev->stored_num_keys -= num_keys; else hdev->stored_num_keys = 0; return rp->status; } static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); if (!sent) return rp->status; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_set_local_name_complete(hdev, sent, rp->status); else if (!rp->status) memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_name *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); return rp->status; } static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) { __u8 param = *((__u8 *) sent); if (param == AUTH_ENABLED) set_bit(HCI_AUTH, &hdev->flags); else clear_bit(HCI_AUTH, &hdev->flags); } if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_auth_enable_complete(hdev, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 param; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); if (!sent) return rp->status; param = *((__u8 *) sent); if (param) set_bit(HCI_ENCRYPT, &hdev->flags); else clear_bit(HCI_ENCRYPT, &hdev->flags); return rp->status; } static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 param; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); if (!sent) return rp->status; param = *((__u8 *) sent); hci_dev_lock(hdev); if (rp->status) { hdev->discov_timeout = 0; goto done; } if (param & SCAN_INQUIRY) set_bit(HCI_ISCAN, &hdev->flags); else clear_bit(HCI_ISCAN, &hdev->flags); if (param & SCAN_PAGE) set_bit(HCI_PSCAN, &hdev->flags); else clear_bit(HCI_PSCAN, &hdev->flags); done: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_set_event_filter *cp; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); if (!sent) return rp->status; cp = (struct hci_cp_set_event_filter *)sent; if (cp->flt_type == HCI_FLT_CLEAR_ALL) hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); else hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); return rp->status; } static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_class_of_dev *rp = data; if (WARN_ON(!hdev)) return HCI_ERROR_UNSPECIFIED; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; memcpy(hdev->dev_class, rp->dev_class, 3); bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); return rp->status; } static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) memcpy(hdev->dev_class, sent, 3); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_set_class_of_dev_complete(hdev, sent, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_voice_setting *rp = data; __u16 setting; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; setting = __le16_to_cpu(rp->voice_setting); if (hdev->voice_setting == setting) return rp->status; hdev->voice_setting = setting; bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); return rp->status; } static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u16 setting; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); if (!sent) return rp->status; setting = get_unaligned_le16(sent); if (hdev->voice_setting == setting) return rp->status; hdev->voice_setting = setting; bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); return rp->status; } static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_num_supported_iac *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->num_iac = rp->num_iac; bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); return rp->status; } static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_ssp_mode *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) { if (sent->mode) hdev->features[1][0] |= LMP_HOST_SSP; else hdev->features[1][0] &= ~LMP_HOST_SSP; } if (!rp->status) { if (sent->mode) hci_dev_set_flag(hdev, HCI_SSP_ENABLED); else hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_sc_support *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) { if (sent->support) hdev->features[1][0] |= LMP_HOST_SC; else hdev->features[1][0] &= ~LMP_HOST_SC; } if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { if (sent->support) hci_dev_set_flag(hdev, HCI_SC_ENABLED); else hci_dev_clear_flag(hdev, HCI_SC_ENABLED); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_version *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) { hdev->hci_ver = rp->hci_ver; hdev->hci_rev = __le16_to_cpu(rp->hci_rev); hdev->lmp_ver = rp->lmp_ver; hdev->manufacturer = __le16_to_cpu(rp->manufacturer); hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); } return rp->status; } static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_enc_key_size *rp = data; struct hci_conn *conn; u16 handle; u8 status = rp->status; bt_dev_dbg(hdev, "status 0x%2.2x", status); handle = le16_to_cpu(rp->handle); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) { status = 0xFF; goto done; } /* While unexpected, the read_enc_key_size command may fail. The most * secure approach is to then assume the key size is 0 to force a * disconnection. */ if (status) { bt_dev_err(hdev, "failed to read key size for handle %u", handle); conn->enc_key_size = 0; } else { u8 *key_enc_size = hci_conn_key_enc_size(conn); conn->enc_key_size = rp->key_size; status = 0; /* Attempt to check if the key size is too small or if it has * been downgraded from the last time it was stored as part of * the link_key. */ if (conn->enc_key_size < hdev->min_enc_key_size || (key_enc_size && conn->enc_key_size < *key_enc_size)) { /* As slave role, the conn->state has been set to * BT_CONNECTED and l2cap conn req might not be received * yet, at this moment the l2cap layer almost does * nothing with the non-zero status. * So we also clear encrypt related bits, and then the * handler of l2cap conn req will get the right secure * state at a later time. */ status = HCI_ERROR_AUTH_FAILURE; clear_bit(HCI_CONN_ENCRYPT, &conn->flags); clear_bit(HCI_CONN_AES_CCM, &conn->flags); } /* Update the key encryption size with the connection one */ if (key_enc_size && *key_enc_size != conn->enc_key_size) *key_enc_size = conn->enc_key_size; } hci_encrypt_cfm(conn, status); done: hci_dev_unlock(hdev); return status; } static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_commands *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); return rp->status; } static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_auth_payload_to *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_write_auth_payload_to *rp = data; struct hci_conn *conn; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); if (!sent) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (!conn) { rp->status = 0xff; goto unlock; } if (!rp->status) conn->auth_payload_timeout = get_unaligned_le16(sent + 2); unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_features *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; memcpy(hdev->features, rp->features, 8); /* Adjust default settings according to features * supported by device. */ if (hdev->features[0][0] & LMP_3SLOT) hdev->pkt_type |= (HCI_DM3 | HCI_DH3); if (hdev->features[0][0] & LMP_5SLOT) hdev->pkt_type |= (HCI_DM5 | HCI_DH5); if (hdev->features[0][1] & LMP_HV2) { hdev->pkt_type |= (HCI_HV2); hdev->esco_type |= (ESCO_HV2); } if (hdev->features[0][1] & LMP_HV3) { hdev->pkt_type |= (HCI_HV3); hdev->esco_type |= (ESCO_HV3); } if (lmp_esco_capable(hdev)) hdev->esco_type |= (ESCO_EV3); if (hdev->features[0][4] & LMP_EV4) hdev->esco_type |= (ESCO_EV4); if (hdev->features[0][4] & LMP_EV5) hdev->esco_type |= (ESCO_EV5); if (hdev->features[0][5] & LMP_EDR_ESCO_2M) hdev->esco_type |= (ESCO_2EV3); if (hdev->features[0][5] & LMP_EDR_ESCO_3M) hdev->esco_type |= (ESCO_3EV3); if (hdev->features[0][5] & LMP_EDR_3S_ESCO) hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); return rp->status; } static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_ext_features *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hdev->max_page < rp->max_page) { if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2)) bt_dev_warn(hdev, "broken local ext features page 2"); else hdev->max_page = rp->max_page; } if (rp->page < HCI_MAX_PAGES) memcpy(hdev->features[rp->page], rp->features, 8); return rp->status; } static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_buffer_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); hdev->sco_mtu = rp->sco_mtu; hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) { hdev->sco_mtu = 64; hdev->sco_pkts = 8; } if (!read_voice_setting_capable(hdev)) hdev->sco_pkts = 0; hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts; BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); if (!hdev->acl_mtu || !hdev->acl_pkts) return HCI_ERROR_INVALID_PARAMETERS; return rp->status; } static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_bd_addr *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) bacpy(&hdev->bdaddr, &rp->bdaddr); if (hci_dev_test_flag(hdev, HCI_SETUP)) bacpy(&hdev->setup_addr, &rp->bdaddr); return rp->status; } static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_pairing_opts *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) { hdev->pairing_opts = rp->pairing_opts; hdev->max_enc_key_size = rp->max_key_size; } return rp->status; } static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_page_scan_activity *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) { hdev->page_scan_interval = __le16_to_cpu(rp->interval); hdev->page_scan_window = __le16_to_cpu(rp->window); } return rp->status; } static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_page_scan_activity *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); if (!sent) return rp->status; hdev->page_scan_interval = __le16_to_cpu(sent->interval); hdev->page_scan_window = __le16_to_cpu(sent->window); return rp->status; } static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_page_scan_type *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) hdev->page_scan_type = rp->type; return rp->status; } static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; u8 *type; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); if (type) hdev->page_scan_type = *type; return rp->status; } static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_clock *rp = data; struct hci_cp_read_clock *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); if (!cp) goto unlock; if (cp->which == 0x00) { hdev->clock = le32_to_cpu(rp->clock); goto unlock; } conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) { conn->clock = le32_to_cpu(rp->clock); conn->clock_accuracy = le16_to_cpu(rp->accuracy); } unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_inq_rsp_tx_power *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->inq_tx_power = rp->tx_power; return rp->status; } static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_def_err_data_reporting *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->err_data_reporting = rp->err_data_reporting; return rp->status; } static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_def_err_data_reporting *cp; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); if (!cp) return rp->status; hdev->err_data_reporting = cp->err_data_reporting; return rp->status; } static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_pin_code_reply *rp = data; struct hci_cp_pin_code_reply *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); if (rp->status) goto unlock; cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); if (!cp) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) conn->pin_length = cp->pin_len; unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_pin_code_neg_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_buffer_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_mtu = __le16_to_cpu(rp->le_mtu); hdev->le_pkts = rp->le_max_pkt; hdev->le_cnt = hdev->le_pkts; BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) return HCI_ERROR_INVALID_PARAMETERS; return rp->status; } static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_local_features *rp = data; BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); if (rp->status) return rp->status; memcpy(hdev->le_features, rp->features, 8); return rp->status; } static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_adv_tx_power *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->adv_tx_power = rp->tx_power; return rp->status; } static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_oob_data *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); return rp->status; } static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_oob_ext_data *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); return rp->status; } static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bdaddr_t *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); if (!sent) return rp->status; hci_dev_lock(hdev); bacpy(&hdev->random_addr, sent); if (!bacmp(&hdev->rpa, sent)) { hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, secs_to_jiffies(hdev->rpa_timeout)); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_default_phy *cp; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); if (!cp) return rp->status; hci_dev_lock(hdev); hdev->le_tx_def_phys = cp->tx_phys; hdev->le_rx_def_phys = cp->rx_phys; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_adv_set_rand_addr *cp; struct adv_info *adv; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); /* Update only in case the adv instance since handle 0x00 shall be using * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and * non-extended adverting. */ if (!cp || !cp->handle) return rp->status; hci_dev_lock(hdev); adv = hci_find_adv_instance(hdev, cp->handle); if (adv) { bacpy(&adv->random_addr, &cp->bdaddr); if (!bacmp(&hdev->rpa, &cp->bdaddr)) { adv->rpa_expired = false; queue_delayed_work(hdev->workqueue, &adv->rpa_expired_cb, secs_to_jiffies(hdev->rpa_timeout)); } } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; u8 *instance; int err; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); if (!instance) return rp->status; hci_dev_lock(hdev); err = hci_remove_adv_instance(hdev, *instance); if (!err) mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, *instance); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct adv_info *adv, *n; int err; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) return rp->status; hci_dev_lock(hdev); list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { u8 instance = adv->instance; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, instance); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_transmit_power *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->min_le_tx_power = rp->min_le_tx_power; hdev->max_le_tx_power = rp->max_le_tx_power; return rp->status; } static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_privacy_mode *cp; struct hci_conn_params *params; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); if (!cp) return rp->status; hci_dev_lock(hdev); params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); if (params) WRITE_ONCE(params->privacy_mode, cp->mode); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); if (!sent) return rp->status; hci_dev_lock(hdev); /* If we're doing connection initiation as peripheral. Set a * timeout in case something goes wrong. */ if (*sent) { struct hci_conn *conn; hci_dev_set_flag(hdev, HCI_LE_ADV); conn = hci_lookup_le_connect(hdev); if (conn) queue_delayed_work(hdev->workqueue, &conn->le_conn_timeout, conn->conn_timeout); } else { hci_dev_clear_flag(hdev, HCI_LE_ADV); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; struct adv_info *adv = NULL, *n; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); if (!cp) return rp->status; set = (void *)cp->data; hci_dev_lock(hdev); if (cp->num_of_sets) adv = hci_find_adv_instance(hdev, set->handle); if (cp->enable) { struct hci_conn *conn; hci_dev_set_flag(hdev, HCI_LE_ADV); if (adv && !adv->periodic) adv->enabled = true; conn = hci_lookup_le_connect(hdev); if (conn) queue_delayed_work(hdev->workqueue, &conn->le_conn_timeout, conn->conn_timeout); } else { if (cp->num_of_sets) { if (adv) adv->enabled = false; /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_ADV */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { if (adv->enabled) goto unlock; } } else { /* All instances shall be considered disabled */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) adv->enabled = false; } hci_dev_clear_flag(hdev, HCI_LE_ADV); } unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_scan_param *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); if (!cp) return rp->status; hci_dev_lock(hdev); hdev->le_scan_type = cp->type; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_ext_scan_params *cp; struct hci_ev_status *rp = data; struct hci_cp_le_scan_phy_params *phy_param; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); if (!cp) return rp->status; phy_param = (void *)cp->data; hci_dev_lock(hdev); hdev->le_scan_type = phy_param->type; hci_dev_unlock(hdev); return rp->status; } static bool has_pending_adv_report(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; return bacmp(&d->last_adv_addr, BDADDR_ANY); } static void clear_pending_adv_report(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; bacpy(&d->last_adv_addr, BDADDR_ANY); d->last_adv_data_len = 0; } static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, s8 rssi, u32 flags, u8 *data, u8 len) { struct discovery_state *d = &hdev->discovery; if (len > max_adv_len(hdev)) return; bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; d->last_adv_flags = flags; memcpy(d->last_adv_data, data, len); d->last_adv_data_len = len; } static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) { hci_dev_lock(hdev); switch (enable) { case LE_SCAN_ENABLE: hci_dev_set_flag(hdev, HCI_LE_SCAN); if (hdev->le_scan_type == LE_SCAN_ACTIVE) { clear_pending_adv_report(hdev); hci_discovery_set_state(hdev, DISCOVERY_FINDING); } break; case LE_SCAN_DISABLE: /* We do this here instead of when setting DISCOVERY_STOPPED * since the latter would potentially require waiting for * inquiry to stop too. */ if (has_pending_adv_report(hdev)) { struct discovery_state *d = &hdev->discovery; mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, d->last_adv_addr_type, NULL, d->last_adv_rssi, d->last_adv_flags, d->last_adv_data, d->last_adv_data_len, NULL, 0, 0); } /* Cancel this timer so that we don't try to disable scanning * when it's already disabled. */ cancel_delayed_work(&hdev->le_scan_disable); hci_dev_clear_flag(hdev, HCI_LE_SCAN); /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we * interrupted scanning due to a connect request. Mark * therefore discovery as stopped. */ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && hdev->discovery.state == DISCOVERY_FINDING) queue_work(hdev->workqueue, &hdev->reenable_adv_work); break; default: bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", enable); break; } hci_dev_unlock(hdev); } static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_scan_enable *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); if (!cp) return rp->status; le_set_scan_enable_complete(hdev, cp->enable); return rp->status; } static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_ext_scan_enable *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); if (!cp) return rp->status; le_set_scan_enable_complete(hdev, cp->enable); return rp->status; } static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_num_supported_adv_sets *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, rp->num_of_sets); if (rp->status) return rp->status; hdev->le_num_of_adv_sets = rp->num_of_sets; return rp->status; } static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_accept_list_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); if (rp->status) return rp->status; hdev->le_accept_list_size = rp->size; return rp->status; } static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->le_accept_list); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_add_to_accept_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_del_from_accept_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_supported_states *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; memcpy(hdev->le_states, rp->le_states, 8); return rp->status; } static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_def_data_len *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); return rp->status; } static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_write_def_data_len *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); if (!sent) return rp->status; hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); return rp->status; } static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_add_to_resolv_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type, sent->peer_irk, sent->local_irk); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_del_from_resolv_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_resolv_list_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); if (rp->status) return rp->status; hdev->le_resolv_list_size = rp->size; return rp->status; } static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); if (!sent) return rp->status; hci_dev_lock(hdev); if (*sent) hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); else hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_max_data_len *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); return rp->status; } static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_write_le_host_supported *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); if (!sent) return rp->status; hci_dev_lock(hdev); if (sent->le) { hdev->features[1][0] |= LMP_HOST_LE; hci_dev_set_flag(hdev, HCI_LE_ENABLED); } else { hdev->features[1][0] &= ~LMP_HOST_LE; hci_dev_clear_flag(hdev, HCI_LE_ENABLED); hci_dev_clear_flag(hdev, HCI_ADVERTISING); } if (sent->simul) hdev->features[1][0] |= LMP_HOST_LE_BREDR; else hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_adv_param *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); if (!cp) return rp->status; hci_dev_lock(hdev); hdev->adv_addr_type = cp->own_address_type; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_rssi *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->rssi = rp->rssi; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_read_tx_power *sent; struct hci_rp_read_tx_power *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); if (!sent) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (!conn) goto unlock; switch (sent->type) { case 0x00: conn->tx_power = rp->tx_power; break; case 0x01: conn->max_tx_power = rp->tx_power; break; } unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; u8 *mode; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); if (mode) hdev->ssp_debug_mode = *mode; return rp->status; } static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { bt_dev_dbg(hdev, "status 0x%2.2x", status); if (status) return; if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY)) set_bit(HCI_INQUIRY, &hdev->flags); } static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_create_conn *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); if (status) { if (conn && conn->state == BT_CONNECT) { conn->state = BT_CLOSED; hci_connect_cfm(conn, status); hci_conn_del(conn); } } else { if (!conn) { conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr, HCI_ROLE_MASTER); if (IS_ERR(conn)) bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); } } hci_dev_unlock(hdev); } static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) { struct hci_cp_add_sco *cp; struct hci_conn *acl; struct hci_link *link; __u16 handle; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); if (!cp) return; handle = __le16_to_cpu(cp->handle); bt_dev_dbg(hdev, "handle 0x%4.4x", handle); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { link = list_first_entry_or_null(&acl->link_list, struct hci_link, list); if (link && link->conn) { link->conn->state = BT_CLOSED; hci_connect_cfm(link->conn, status); hci_conn_del(link->conn); } } hci_dev_unlock(hdev); } static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) { struct hci_cp_auth_requested *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) { struct hci_cp_set_conn_encrypt *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static int hci_outgoing_auth_needed(struct hci_dev *hdev, struct hci_conn *conn) { if (conn->state != BT_CONFIG || !conn->out) return 0; if (conn->pending_sec_level == BT_SECURITY_SDP) return 0; /* Only request authentication for SSP connections or non-SSP * devices with sec_level MEDIUM or HIGH or if MITM protection * is requested. */ if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && conn->pending_sec_level != BT_SECURITY_FIPS && conn->pending_sec_level != BT_SECURITY_HIGH && conn->pending_sec_level != BT_SECURITY_MEDIUM) return 0; return 1; } static int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &e->data.bdaddr); cp.pscan_rep_mode = e->data.pscan_rep_mode; cp.pscan_mode = e->data.pscan_mode; cp.clock_offset = e->data.clock_offset; return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } static bool hci_resolve_next_name(struct hci_dev *hdev) { struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; if (list_empty(&discov->resolve)) return false; /* We should stop if we already spent too much time resolving names. */ if (time_after(jiffies, discov->name_resolve_timeout)) { bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); return false; } e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); if (!e) return false; if (hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; return true; } return false; } static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *name, u8 name_len) { struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; /* Update the mgmt connected state if necessary. Be careful with * conn objects that exist but are not (yet) connected however. * Only those in BT_CONFIG or BT_CONNECTED states can be * considered connected. */ if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) mgmt_device_connected(hdev, conn, name, name_len); if (discov->state == DISCOVERY_STOPPED) return; if (discov->state == DISCOVERY_STOPPING) goto discov_complete; if (discov->state != DISCOVERY_RESOLVING) return; e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); /* If the device was not found in a list of found devices names of which * are pending. there is no need to continue resolving a next name as it * will be done upon receiving another Remote Name Request Complete * Event */ if (!e) return; list_del(&e->list); e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, name, name_len); if (hci_resolve_next_name(hdev)) return; discov_complete: hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) { struct hci_cp_remote_name_req *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* If successful wait for the name req complete event before * checking for the need to do authentication */ if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (hci_dev_test_flag(hdev, HCI_MGMT)) hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); if (!conn) goto unlock; if (!hci_outgoing_auth_needed(hdev, conn)) goto unlock; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested auth_cp; set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); auth_cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(auth_cp), &auth_cp); } unlock: hci_dev_unlock(hdev); } static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) { struct hci_cp_read_remote_features *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) { struct hci_cp_read_remote_ext_features *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle, __u8 status) { struct hci_conn *acl; struct hci_link *link; bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { link = list_first_entry_or_null(&acl->link_list, struct hci_link, list); if (link && link->conn) { link->conn->state = BT_CLOSED; hci_connect_cfm(link->conn, status); hci_conn_del(link->conn); } } hci_dev_unlock(hdev); } static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_setup_sync_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); if (!cp) return; hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); } static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_enhanced_setup_sync_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); if (!cp) return; hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); } static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) { struct hci_cp_sniff_mode *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, status); } hci_dev_unlock(hdev); } static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) { struct hci_cp_exit_sniff_mode *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, status); } hci_dev_unlock(hdev); } static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) { struct hci_cp_disconnect *cp; struct hci_conn_params *params; struct hci_conn *conn; bool mgmt_conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended * otherwise cleanup the connection immediately. */ if (!status && !hdev->suspended) return; cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (!conn) goto unlock; if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) { mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; hci_enable_advertising(hdev); } /* Inform sockets conn is gone before we delete it */ hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED); goto done; } /* During suspend, mark connection as closed immediately * since we might not receive HCI_EV_DISCONN_COMPLETE */ if (hdev->suspended) conn->state = BT_CLOSED; mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); if (conn->type == ACL_LINK) { if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); } params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { switch (params->auto_connect) { case HCI_AUTO_CONN_LINK_LOSS: if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) break; fallthrough; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: hci_pend_le_list_del_init(params); hci_pend_le_list_add(params, &hdev->pend_le_conns); break; default: break; } } mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, cp->reason, mgmt_conn); hci_disconn_cfm(conn, cp->reason); done: /* If the disconnection failed for any reason, the upper layer * does not retry to disconnect in current implementation. * Hence, we need to do some basic cleanup here and re-enable * advertising if necessary. */ hci_conn_del(conn); unlock: hci_dev_unlock(hdev); } static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) { /* When using controller based address resolution, then the new * address types 0x02 and 0x03 are used. These types need to be * converted back into either public address or random address type */ switch (type) { case ADDR_LE_DEV_PUBLIC_RESOLVED: if (resolved) *resolved = true; return ADDR_LE_DEV_PUBLIC; case ADDR_LE_DEV_RANDOM_RESOLVED: if (resolved) *resolved = true; return ADDR_LE_DEV_RANDOM; } if (resolved) *resolved = false; return type; } static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, u8 peer_addr_type, u8 own_address_type, u8 filter_policy) { struct hci_conn *conn; conn = hci_conn_hash_lookup_le(hdev, peer_addr, peer_addr_type); if (!conn) return; own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); /* Store the initiator and responder address information which * is needed for SMP. These values will not change during the * lifetime of the connection. */ conn->init_addr_type = own_address_type; if (own_address_type == ADDR_LE_DEV_RANDOM) bacpy(&conn->init_addr, &hdev->random_addr); else bacpy(&conn->init_addr, &hdev->bdaddr); conn->resp_addr_type = peer_addr_type; bacpy(&conn->resp_addr, peer_addr); } static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) { struct hci_cp_le_create_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, cp->own_address_type, cp->filter_policy); hci_dev_unlock(hdev); } static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) { struct hci_cp_le_ext_create_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, cp->own_addr_type, cp->filter_policy); hci_dev_unlock(hdev); } static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) { struct hci_cp_le_read_remote_features *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) { struct hci_cp_le_start_enc *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; hci_dev_lock(hdev); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); if (!cp) goto unlock; conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (!conn) goto unlock; if (conn->state != BT_CONNECTED) goto unlock; hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); unlock: hci_dev_unlock(hdev); } static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) { struct hci_cp_switch_role *cp; struct hci_conn *conn; BT_DBG("%s status 0x%2.2x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); hci_dev_unlock(hdev); } static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *ev = data; struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) return; smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; hci_dev_lock(hdev); if (discov->state != DISCOVERY_FINDING) goto unlock; if (list_empty(&discov->resolve)) { /* When BR/EDR inquiry is active and no LE scanning is in * progress, then change discovery state to indicate completion. * * When running LE scanning and BR/EDR inquiry simultaneously * and the LE scan already finished, then change the discovery * state to indicate completion. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); goto unlock; } e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); if (e && hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; } else { /* When BR/EDR inquiry is active and no LE scanning is in * progress, then change discovery state to indicate completion. * * When running LE scanning and BR/EDR inquiry simultaneously * and the LE scan already finished, then change the discovery * state to indicate completion. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } unlock: hci_dev_unlock(hdev); } static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_inquiry_result *ev = edata; struct inquiry_data data; int i; if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, flex_array_size(ev, info, ev->num))) return; bt_dev_dbg(hdev, "num %d", ev->num); if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); for (i = 0; i < ev->num; i++) { struct inquiry_info *info = &ev->info[i]; u32 flags; bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = HCI_RSSI_INVALID; data.ssp_mode = 0x00; flags = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, HCI_RSSI_INVALID, flags, NULL, 0, NULL, 0, 0); } hci_dev_unlock(hdev); } static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_read_enc_key_size cp; u8 *key_enc_size = hci_conn_key_enc_size(conn); if (!read_key_size_capable(hdev)) { conn->enc_key_size = HCI_LINK_KEY_SIZE; return -EOPNOTSUPP; } bt_dev_dbg(hdev, "hcon %p", conn); memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); /* If the key enc_size is already known, use it as conn->enc_key_size, * otherwise use hdev->min_enc_key_size so the likes of * l2cap_check_enc_key_size don't fail while waiting for * HCI_OP_READ_ENC_KEY_SIZE response. */ if (key_enc_size && *key_enc_size) conn->enc_key_size = *key_enc_size; else conn->enc_key_size = hdev->min_enc_key_size; return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); } static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_conn_complete *ev = data; struct hci_conn *conn; u8 status = ev->status; bt_dev_dbg(hdev, "status 0x%2.2x", status); hci_dev_lock(hdev); /* Check for existing connection: * * 1. If it doesn't exist then it must be receiver/slave role. * 2. If it does exist confirm that it is connecting/BT_CONNECT in case * of initiator/master role since there could be a collision where * either side is attempting to connect or something like a fuzzing * testing is trying to play tricks to destroy the hcon object before * it even attempts to connect (e.g. hcon->state == BT_OPEN). */ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn || (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) { /* In case of error status and there is no connection pending * just unlock as there is nothing to cleanup. */ if (ev->status) goto unlock; /* Connection may not exist if auto-connected. Check the bredr * allowlist to see if this device is allowed to auto connect. * If link is an ACL type, create a connection class * automatically. * * Auto-connect will only occur if the event filter is * programmed with a given address. Right now, event filter is * only used during suspend. */ if (ev->link_type == ACL_LINK && hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, BDADDR_BREDR)) { conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, HCI_ROLE_SLAVE); if (IS_ERR(conn)) { bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); goto unlock; } } else { if (ev->link_type != SCO_LINK) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); if (!conn) goto unlock; conn->type = SCO_LINK; } } /* The HCI_Connection_Complete event is only sent once per connection. * Processing it more than once per connection can corrupt kernel memory. * * As the connection handle is set here for the first time, it indicates * whether the connection is already set up. */ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); goto unlock; } if (!status) { status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); if (status) goto done; if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; hci_conn_hold(conn); if (!conn->out && !hci_conn_ssp_enabled(conn) && !hci_find_link_key(hdev, &ev->bdaddr)) conn->disc_timeout = HCI_PAIRING_TIMEOUT; else conn->disc_timeout = HCI_DISCONN_TIMEOUT; } else conn->state = BT_CONNECTED; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); if (test_bit(HCI_AUTH, &hdev->flags)) set_bit(HCI_CONN_AUTH, &conn->flags); if (test_bit(HCI_ENCRYPT, &hdev->flags)) set_bit(HCI_CONN_ENCRYPT, &conn->flags); /* "Link key request" completed ahead of "connect request" completes */ if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) && ev->link_type == ACL_LINK) { struct link_key *key; key = hci_find_link_key(hdev, &ev->bdaddr); if (key) { set_bit(HCI_CONN_ENCRYPT, &conn->flags); hci_read_enc_key_size(hdev, conn); hci_encrypt_cfm(conn, ev->status); } } /* Get remote features */ if (conn->type == ACL_LINK) { struct hci_cp_read_remote_features cp; cp.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); hci_update_scan(hdev); } /* Set packet type for incoming connection */ if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { struct hci_cp_change_conn_ptype cp; cp.handle = ev->handle; cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp); } } if (conn->type == ACL_LINK) hci_sco_setup(conn, ev->status); done: if (status) { hci_conn_failed(conn, status); } else if (ev->link_type == SCO_LINK) { switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_CVSD: if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); break; } hci_connect_cfm(conn, status); } unlock: hci_dev_unlock(hdev); } static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct hci_cp_reject_conn_req cp; bacpy(&cp.bdaddr, bdaddr); cp.reason = HCI_ERROR_REJ_BAD_ADDR; hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); } static void hci_conn_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_conn_request *ev = data; int mask = hdev->link_mode; struct inquiry_entry *ie; struct hci_conn *conn; __u8 flags = 0; bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); /* Reject incoming connection from device with same BD ADDR against * CVE-2020-26555 */ if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) { bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", &ev->bdaddr); hci_reject_conn(hdev, &ev->bdaddr); return; } mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_reject_conn(hdev, &ev->bdaddr); return; } hci_dev_lock(hdev); if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, BDADDR_BREDR)) { hci_reject_conn(hdev, &ev->bdaddr); goto unlock; } /* Require HCI_CONNECTABLE or an accept list entry to accept the * connection. These features are only touched through mgmt so * only do the checks if HCI_MGMT is set. */ if (hci_dev_test_flag(hdev, HCI_MGMT) && !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, BDADDR_BREDR)) { hci_reject_conn(hdev, &ev->bdaddr); goto unlock; } /* Connection accepted */ ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) memcpy(ie->data.dev_class, ev->dev_class, 3); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, HCI_ROLE_SLAVE); if (IS_ERR(conn)) { bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); goto unlock; } } memcpy(conn->dev_class, ev->dev_class, 3); hci_dev_unlock(hdev); if (ev->link_type == ACL_LINK || (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { struct hci_cp_accept_conn_req cp; conn->state = BT_CONNECT; bacpy(&cp.bdaddr, &ev->bdaddr); if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) cp.role = 0x00; /* Become central */ else cp.role = 0x01; /* Remain peripheral */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else if (!(flags & HCI_PROTO_DEFER)) { struct hci_cp_accept_sync_conn_req cp; conn->state = BT_CONNECT; bacpy(&cp.bdaddr, &ev->bdaddr); cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.max_latency = cpu_to_le16(0xffff); cp.content_format = cpu_to_le16(hdev->voice_setting); cp.retrans_effort = 0xff; hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); } else { conn->state = BT_CONNECT2; hci_connect_cfm(conn, 0); } return; unlock: hci_dev_unlock(hdev); } static u8 hci_to_mgmt_reason(u8 err) { switch (err) { case HCI_ERROR_CONNECTION_TIMEOUT: return MGMT_DEV_DISCONN_TIMEOUT; case HCI_ERROR_REMOTE_USER_TERM: case HCI_ERROR_REMOTE_LOW_RESOURCES: case HCI_ERROR_REMOTE_POWER_OFF: return MGMT_DEV_DISCONN_REMOTE; case HCI_ERROR_LOCAL_HOST_TERM: return MGMT_DEV_DISCONN_LOCAL_HOST; default: return MGMT_DEV_DISCONN_UNKNOWN; } } static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_disconn_complete *ev = data; u8 reason; struct hci_conn_params *params; struct hci_conn *conn; bool mgmt_connected; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (ev->status) { mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, ev->status); goto unlock; } conn->state = BT_CLOSED; mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) reason = MGMT_DEV_DISCONN_AUTH_FAILURE; else reason = hci_to_mgmt_reason(ev->reason); mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, reason, mgmt_connected); if (conn->type == ACL_LINK) { if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); hci_update_scan(hdev); } /* Re-enable passive scanning if disconnected device is marked * as auto-connectable. */ if (conn->type == LE_LINK) { params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { switch (params->auto_connect) { case HCI_AUTO_CONN_LINK_LOSS: if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) break; fallthrough; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: hci_pend_le_list_del_init(params); hci_pend_le_list_add(params, &hdev->pend_le_conns); hci_update_passive_scan(hdev); break; default: break; } } } hci_disconn_cfm(conn, ev->reason); /* Re-enable advertising if necessary, since it might * have been disabled by the connection. From the * HCI_LE_Set_Advertise_Enable command description in * the core specification (v4.0): * "The Controller shall continue advertising until the Host * issues an LE_Set_Advertise_Enable command with * Advertising_Enable set to 0x00 (Advertising is disabled) * or until a connection is created or until the Advertising * is timed out due to Directed Advertising." */ if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; hci_enable_advertising(hdev); } hci_conn_del(conn); unlock: hci_dev_unlock(hdev); } static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_auth_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) { clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); set_bit(HCI_CONN_AUTH, &conn->flags); conn->sec_level = conn->pending_sec_level; } else { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); mgmt_auth_failed(conn, ev->status); } clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); if (conn->state == BT_CONFIG) { if (!ev->status && hci_conn_ssp_enabled(conn)) { struct hci_cp_set_conn_encrypt cp; cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else { conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } } else { hci_auth_cfm(conn, ev->status); hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(conn); } if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { if (!ev->status) { struct hci_cp_set_conn_encrypt cp; cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); hci_encrypt_cfm(conn, ev->status); } } unlock: hci_dev_unlock(hdev); } static void hci_remote_name_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_name *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto check_auth; if (ev->status == 0) hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, strnlen(ev->name, HCI_MAX_NAME_LENGTH)); else hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); check_auth: if (!conn) goto unlock; if (!hci_outgoing_auth_needed(hdev, conn)) goto unlock; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested cp; set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_encrypt_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) { if (ev->encrypt) { /* Encryption implies authentication */ set_bit(HCI_CONN_AUTH, &conn->flags); set_bit(HCI_CONN_ENCRYPT, &conn->flags); conn->sec_level = conn->pending_sec_level; /* P-256 authentication key implies FIPS */ if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) set_bit(HCI_CONN_FIPS, &conn->flags); if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || conn->type == LE_LINK) set_bit(HCI_CONN_AES_CCM, &conn->flags); } else { clear_bit(HCI_CONN_ENCRYPT, &conn->flags); clear_bit(HCI_CONN_AES_CCM, &conn->flags); } } /* We should disregard the current RPA and generate a new one * whenever the encryption procedure fails. */ if (ev->status && conn->type == LE_LINK) { hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, true); } clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); /* Check link security requirements are met */ if (!hci_conn_check_link_mode(conn)) ev->status = HCI_ERROR_AUTH_FAILURE; if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); /* Notify upper layers so they can cleanup before * disconnecting. */ hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { if (hci_read_enc_key_size(hdev, conn)) goto notify; goto unlock; } /* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers * to avoid unexpected SMP command errors when pairing. */ if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT)) goto notify; /* Set the default Authenticated Payload Timeout after * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be * sent when the link is active and Encryption is enabled, the conn * type can be either LE or ACL and controller must support LMP Ping. * Ensure for AES-CCM encryption as well. */ if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && test_bit(HCI_CONN_AES_CCM, &conn->flags) && ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { struct hci_cp_write_auth_payload_to cp; cp.handle = cpu_to_le16(conn->handle); cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, sizeof(cp), &cp)) bt_dev_err(hdev, "write auth payload timeout failed"); } notify: hci_encrypt_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); } static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_change_link_key_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { if (!ev->status) set_bit(HCI_CONN_SECURE, &conn->flags); clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); hci_key_change_cfm(conn, ev->status); } hci_dev_unlock(hdev); } static void hci_remote_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_features *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) memcpy(conn->features[0], ev->features, 8); if (conn->state != BT_CONFIG) goto unlock; if (!ev->status && lmp_ext_feat_capable(hdev) && lmp_ext_feat_capable(conn)) { struct hci_cp_read_remote_ext_features cp; cp.handle = ev->handle; cp.page = 0x01; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, sizeof(cp), &cp); goto unlock; } if (!ev->status) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else { mgmt_device_connected(hdev, conn, NULL, 0); } if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } unlock: hci_dev_unlock(hdev); } static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) { cancel_delayed_work(&hdev->cmd_timer); rcu_read_lock(); if (!test_bit(HCI_RESET, &hdev->flags)) { if (ncmd) { cancel_delayed_work(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); } else { if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, HCI_NCMD_TIMEOUT); } } rcu_read_unlock(); } static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_buffer_size_v2 *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); hdev->le_pkts = rp->acl_max_pkt; hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); hdev->iso_pkts = rp->iso_max_pkt; hdev->le_cnt = hdev->le_pkts; hdev->iso_cnt = hdev->iso_pkts; BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) return HCI_ERROR_INVALID_PARAMETERS; return rp->status; } static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status) { struct hci_conn *conn, *tmp; lockdep_assert_held(&hdev->lock); list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { if (conn->type != CIS_LINK || conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig) continue; if (HCI_CONN_HANDLE_UNSET(conn->handle)) hci_conn_failed(conn, status); } } static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_set_cig_params *rp = data; struct hci_cp_le_set_cig_params *cp; struct hci_conn *conn; u8 status = rp->status; bool pending = false; int i; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); if (!rp->status && (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id)) { bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); status = HCI_ERROR_UNSPECIFIED; } hci_dev_lock(hdev); /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554 * * If the Status return parameter is non-zero, then the state of the CIG * and its CIS configurations shall not be changed by the command. If * the CIG did not already exist, it shall not be created. */ if (status) { /* Keep current configuration, fail only the unbound CIS */ hci_unbound_cis_failed(hdev, rp->cig_id, status); goto unlock; } /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 * * If the Status return parameter is zero, then the Controller shall * set the Connection_Handle arrayed return parameter to the connection * handle(s) corresponding to the CIS configurations specified in * the CIS_IDs command parameter, in the same order. */ for (i = 0; i < rp->num_handles; ++i) { conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id, cp->cis[i].cis_id); if (!conn || !bacmp(&conn->dst, BDADDR_ANY)) continue; if (conn->state != BT_BOUND && conn->state != BT_CONNECT) continue; if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i]))) continue; if (conn->state == BT_CONNECT) pending = true; } unlock: if (pending) hci_le_create_cis_pending(hdev); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_setup_iso_path *rp = data; struct hci_cp_le_setup_iso_path *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); if (!cp) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (!conn) goto unlock; if (rp->status) { hci_connect_cfm(conn, rp->status); hci_conn_del(conn); goto unlock; } switch (cp->direction) { /* Input (Host to Controller) */ case 0x00: /* Only confirm connection if output only */ if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) hci_connect_cfm(conn, rp->status); break; /* Output (Controller to Host) */ case 0x01: /* Confirm connection since conn->iso_qos is always configured * last. */ hci_connect_cfm(conn, rp->status); /* Notify device connected in case it is a BIG Sync */ if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) mgmt_device_connected(hdev, conn, NULL, 0); break; } unlock: hci_dev_unlock(hdev); return rp->status; } static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) { bt_dev_dbg(hdev, "status 0x%2.2x", status); } static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_per_adv_params *cp; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); if (!cp) return rp->status; /* TODO: set the conn state */ return rp->status; } static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_per_adv_enable *cp; struct adv_info *adv = NULL, *n; u8 per_adv_cnt = 0; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); if (!cp) return rp->status; hci_dev_lock(hdev); adv = hci_find_adv_instance(hdev, cp->handle); if (cp->enable) { hci_dev_set_flag(hdev, HCI_LE_PER_ADV); if (adv) adv->enabled = true; } else { /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_PER_ADV. * The current periodic adv instance will be marked as * disabled once extended advertising is also disabled. */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { if (adv->periodic && adv->enabled) per_adv_cnt++; } if (per_adv_cnt > 1) goto unlock; hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); } unlock: hci_dev_unlock(hdev); return rp->status; } #define HCI_CC_VL(_op, _func, _min, _max) \ { \ .op = _op, \ .func = _func, \ .min_len = _min, \ .max_len = _max, \ } #define HCI_CC(_op, _func, _len) \ HCI_CC_VL(_op, _func, _len, _len) #define HCI_CC_STATUS(_op, _func) \ HCI_CC(_op, _func, sizeof(struct hci_ev_status)) static const struct hci_cc { u16 op; u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); u16 min_len; u16 max_len; } hci_cc_table[] = { HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel, sizeof(struct hci_rp_remote_name_req_cancel)), HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, sizeof(struct hci_rp_role_discovery)), HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, sizeof(struct hci_rp_read_link_policy)), HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, sizeof(struct hci_rp_write_link_policy)), HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, sizeof(struct hci_rp_read_def_link_policy)), HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, hci_cc_write_def_link_policy), HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, sizeof(struct hci_rp_read_stored_link_key)), HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, sizeof(struct hci_rp_delete_stored_link_key)), HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, sizeof(struct hci_rp_read_local_name)), HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, sizeof(struct hci_rp_read_class_of_dev)), HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, sizeof(struct hci_rp_read_voice_setting)), HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, sizeof(struct hci_rp_read_num_supported_iac)), HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, sizeof(struct hci_rp_read_auth_payload_to)), HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, sizeof(struct hci_rp_write_auth_payload_to)), HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, sizeof(struct hci_rp_read_local_version)), HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, sizeof(struct hci_rp_read_local_commands)), HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, sizeof(struct hci_rp_read_local_features)), HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, sizeof(struct hci_rp_read_local_ext_features)), HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, sizeof(struct hci_rp_read_buffer_size)), HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, sizeof(struct hci_rp_read_bd_addr)), HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, sizeof(struct hci_rp_read_local_pairing_opts)), HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, sizeof(struct hci_rp_read_page_scan_activity)), HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, hci_cc_write_page_scan_activity), HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, sizeof(struct hci_rp_read_page_scan_type)), HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, sizeof(struct hci_rp_read_clock)), HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, sizeof(struct hci_rp_read_enc_key_size)), HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, sizeof(struct hci_rp_read_inq_rsp_tx_power)), HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, hci_cc_read_def_err_data_reporting, sizeof(struct hci_rp_read_def_err_data_reporting)), HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, hci_cc_write_def_err_data_reporting), HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, sizeof(struct hci_rp_pin_code_reply)), HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, sizeof(struct hci_rp_pin_code_neg_reply)), HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, sizeof(struct hci_rp_read_local_oob_data)), HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, sizeof(struct hci_rp_read_local_oob_ext_data)), HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, sizeof(struct hci_rp_le_read_buffer_size)), HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, sizeof(struct hci_rp_le_read_local_features)), HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, sizeof(struct hci_rp_le_read_adv_tx_power)), HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, hci_cc_le_read_accept_list_size, sizeof(struct hci_rp_le_read_accept_list_size)), HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, hci_cc_le_add_to_accept_list), HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, hci_cc_le_del_from_accept_list), HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, sizeof(struct hci_rp_le_read_supported_states)), HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, sizeof(struct hci_rp_le_read_def_data_len)), HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, hci_cc_le_write_def_data_len), HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, hci_cc_le_add_to_resolv_list), HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, hci_cc_le_del_from_resolv_list), HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, hci_cc_le_clear_resolv_list), HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, sizeof(struct hci_rp_le_read_resolv_list_size)), HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, hci_cc_le_set_addr_resolution_enable), HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, sizeof(struct hci_rp_le_read_max_data_len)), HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, hci_cc_write_le_host_supported), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, sizeof(struct hci_rp_read_rssi)), HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, sizeof(struct hci_rp_read_tx_power)), HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, hci_cc_le_set_ext_scan_param), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, hci_cc_le_set_ext_scan_enable), HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, hci_cc_le_read_num_adv_sets, sizeof(struct hci_rp_le_read_num_supported_adv_sets)), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, hci_cc_le_set_ext_adv_enable), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, hci_cc_le_set_adv_set_random_addr), HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, hci_cc_le_set_per_adv_enable), HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, sizeof(struct hci_rp_le_read_transmit_power)), HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, sizeof(struct hci_rp_le_read_buffer_size_v2)), HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, sizeof(struct hci_rp_le_setup_iso_path)), }; static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, struct sk_buff *skb) { void *data; if (skb->len < cc->min_len) { bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", cc->op, skb->len, cc->min_len); return HCI_ERROR_UNSPECIFIED; } /* Just warn if the length is over max_len size it still be possible to * partially parse the cc so leave to callback to decide if that is * acceptable. */ if (skb->len > cc->max_len) bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", cc->op, skb->len, cc->max_len); data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); if (!data) return HCI_ERROR_UNSPECIFIED; return cc->func(hdev, data, skb); } static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct hci_ev_cmd_complete *ev = data; int i; *opcode = __le16_to_cpu(ev->opcode); bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { if (hci_cc_table[i].op == *opcode) { *status = hci_cc_func(hdev, &hci_cc_table[i], skb); break; } } if (i == ARRAY_SIZE(hci_cc_table)) { /* Unknown opcode, assume byte 0 contains the status, so * that e.g. __hci_cmd_sync() properly returns errors * for vendor specific commands send by HCI drivers. * If a vendor doesn't actually follow this convention we may * need to introduce a vendor CC table in order to properly set * the status. */ *status = skb->data[0]; } handle_cmd_cnt_and_timer(hdev, ev->ncmd); hci_req_cmd_complete(hdev, *opcode, *status, req_complete, req_complete_skb); if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", *opcode); return; } if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); } static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) { struct hci_cp_le_create_cis *cp; bool pending = false; int i; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); if (!cp) return; hci_dev_lock(hdev); /* Remove connection if command failed */ for (i = 0; i < cp->num_cis; i++) { struct hci_conn *conn; u16 handle; handle = __le16_to_cpu(cp->cis[i].cis_handle); conn = hci_conn_hash_lookup_handle(hdev, handle); if (conn) { if (test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags)) pending = true; conn->state = BT_CLOSED; hci_connect_cfm(conn, status); hci_conn_del(conn); } } cp->num_cis = 0; if (pending) hci_le_create_cis_pending(hdev); hci_dev_unlock(hdev); } #define HCI_CS(_op, _func) \ { \ .op = _op, \ .func = _func, \ } static const struct hci_cs { u16 op; void (*func)(struct hci_dev *hdev, __u8 status); } hci_cs_table[] = { HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, hci_cs_read_remote_ext_features), HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, hci_cs_enhanced_setup_sync_conn), HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), }; static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct hci_ev_cmd_status *ev = data; int i; *opcode = __le16_to_cpu(ev->opcode); *status = ev->status; bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { if (hci_cs_table[i].op == *opcode) { hci_cs_table[i].func(hdev, ev->status); break; } } handle_cmd_cnt_and_timer(hdev, ev->ncmd); /* Indicate request completion if the command failed. Also, if * we're not waiting for a special event and we get a success * command status we should try to flag the request as completed * (since for this kind of commands there will not be a command * complete event). */ if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) { hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, req_complete_skb); if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", *opcode); return; } } if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); } static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_hardware_error *ev = data; bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); hdev->hw_error_code = ev->code; queue_work(hdev->req_workqueue, &hdev->error_reset); } static void hci_role_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_role_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { if (!ev->status) conn->role = ev->role; clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); hci_role_switch_cfm(conn, ev->status, ev->role); } hci_dev_unlock(hdev); } static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_num_comp_pkts *ev = data; int i; if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, flex_array_size(ev, handles, ev->num))) return; bt_dev_dbg(hdev, "num %d", ev->num); hci_dev_lock(hdev); for (i = 0; i < ev->num; i++) { struct hci_comp_pkts_info *info = &ev->handles[i]; struct hci_conn *conn; __u16 handle, count; unsigned int i; handle = __le16_to_cpu(info->handle); count = __le16_to_cpu(info->count); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) continue; /* Check if there is really enough packets outstanding before * attempting to decrease the sent counter otherwise it could * underflow.. */ if (conn->sent >= count) { conn->sent -= count; } else { bt_dev_warn(hdev, "hcon %p sent %u < count %u", conn, conn->sent, count); conn->sent = 0; } for (i = 0; i < count; ++i) hci_conn_tx_dequeue(conn); switch (conn->type) { case ACL_LINK: hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; break; case LE_LINK: if (hdev->le_pkts) { hdev->le_cnt += count; if (hdev->le_cnt > hdev->le_pkts) hdev->le_cnt = hdev->le_pkts; } else { hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; } break; case SCO_LINK: case ESCO_LINK: hdev->sco_cnt += count; if (hdev->sco_cnt > hdev->sco_pkts) hdev->sco_cnt = hdev->sco_pkts; break; case CIS_LINK: case BIS_LINK: case PA_LINK: hdev->iso_cnt += count; if (hdev->iso_cnt > hdev->iso_pkts) hdev->iso_cnt = hdev->iso_pkts; break; default: bt_dev_err(hdev, "unknown type %d conn %p", conn->type, conn); break; } } queue_work(hdev->workqueue, &hdev->tx_work); hci_dev_unlock(hdev); } static void hci_mode_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_mode_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { conn->mode = ev->mode; if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { if (conn->mode == HCI_CM_ACTIVE) set_bit(HCI_CONN_POWER_SAVE, &conn->flags); else clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); } if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, ev->status); } hci_dev_unlock(hdev); } static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_pin_code_req *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; if (conn->state == BT_CONNECTED) { hci_conn_hold(conn); conn->disc_timeout = HCI_PAIRING_TIMEOUT; hci_conn_drop(conn); } if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { u8 secure; if (conn->pending_sec_level == BT_SECURITY_HIGH) secure = 1; else secure = 0; mgmt_pin_code_request(hdev, &ev->bdaddr, secure); } unlock: hci_dev_unlock(hdev); } static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) { if (key_type == HCI_LK_CHANGED_COMBINATION) return; conn->pin_length = pin_len; conn->key_type = key_type; switch (key_type) { case HCI_LK_LOCAL_UNIT: case HCI_LK_REMOTE_UNIT: case HCI_LK_DEBUG_COMBINATION: return; case HCI_LK_COMBINATION: if (pin_len == 16) conn->pending_sec_level = BT_SECURITY_HIGH; else conn->pending_sec_level = BT_SECURITY_MEDIUM; break; case HCI_LK_UNAUTH_COMBINATION_P192: case HCI_LK_UNAUTH_COMBINATION_P256: conn->pending_sec_level = BT_SECURITY_MEDIUM; break; case HCI_LK_AUTH_COMBINATION_P192: conn->pending_sec_level = BT_SECURITY_HIGH; break; case HCI_LK_AUTH_COMBINATION_P256: conn->pending_sec_level = BT_SECURITY_FIPS; break; } } static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_link_key_req *ev = data; struct hci_cp_link_key_reply cp; struct hci_conn *conn; struct link_key *key; bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; hci_dev_lock(hdev); key = hci_find_link_key(hdev, &ev->bdaddr); if (!key) { bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); goto not_found; } bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || key->type == HCI_LK_UNAUTH_COMBINATION_P256) && conn->auth_type != 0xff && (conn->auth_type & 0x01)) { bt_dev_dbg(hdev, "ignoring unauthenticated key"); goto not_found; } if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && (conn->pending_sec_level == BT_SECURITY_HIGH || conn->pending_sec_level == BT_SECURITY_FIPS)) { bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); goto not_found; } conn_set_key(conn, key->type, key->pin_len); } bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); hci_dev_unlock(hdev); return; not_found: hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); hci_dev_unlock(hdev); } static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_link_key_notify *ev = data; struct hci_conn *conn; struct link_key *key; bool persistent; u8 pin_len = 0; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; /* Ignore NULL link key against CVE-2020-26555 */ if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) { bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR", &ev->bdaddr); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(conn); set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); conn_set_key(conn, ev->key_type, conn->pin_length); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, ev->key_type, pin_len, &persistent); if (!key) goto unlock; /* Update connection information since adding the key will have * fixed up the type in the case of changed combination keys. */ if (ev->key_type == HCI_LK_CHANGED_COMBINATION) conn_set_key(conn, key->type, key->pin_len); mgmt_new_link_key(hdev, key, persistent); /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag * is set. If it's not set simply remove the key from the kernel * list (we've still notified user space about it but with * store_hint being 0). */ if (key->type == HCI_LK_DEBUG_COMBINATION && !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { list_del_rcu(&key->list); kfree_rcu(key, rcu); goto unlock; } if (persistent) clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); else set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); unlock: hci_dev_unlock(hdev); } static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_clock_offset *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn && !ev->status) { struct inquiry_entry *ie; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) { ie->data.clock_offset = ev->clock_offset; ie->timestamp = jiffies; } } hci_dev_unlock(hdev); } static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_pkt_type_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn && !ev->status) conn->pkt_type = __le16_to_cpu(ev->pkt_type); hci_dev_unlock(hdev); } static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_pscan_rep_mode *ev = data; struct inquiry_entry *ie; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) { ie->data.pscan_rep_mode = ev->pscan_rep_mode; ie->timestamp = jiffies; } hci_dev_unlock(hdev); } static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_inquiry_result_rssi *ev = edata; struct inquiry_data data; int i; bt_dev_dbg(hdev, "num_rsp %d", ev->num); if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); if (skb->len == array_size(ev->num, sizeof(struct inquiry_info_rssi_pscan))) { struct inquiry_info_rssi_pscan *info; for (i = 0; i < ev->num; i++) { u32 flags; info = hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT_WITH_RSSI, sizeof(*info)); if (!info) { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); goto unlock; } bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x00; flags = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, flags, NULL, 0, NULL, 0, 0); } } else if (skb->len == array_size(ev->num, sizeof(struct inquiry_info_rssi))) { struct inquiry_info_rssi *info; for (i = 0; i < ev->num; i++) { u32 flags; info = hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT_WITH_RSSI, sizeof(*info)); if (!info) { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); goto unlock; } bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = 0x00; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x00; flags = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, flags, NULL, 0, NULL, 0, 0); } } else { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); } unlock: hci_dev_unlock(hdev); } static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_ext_features *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (ev->page < HCI_MAX_PAGES) memcpy(conn->features[ev->page], ev->features, 8); if (!ev->status && ev->page == 0x01) { struct inquiry_entry *ie; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); if (ev->features[0] & LMP_HOST_SSP) { set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } else { /* It is mandatory by the Bluetooth specification that * Extended Inquiry Results are only used when Secure * Simple Pairing is enabled, but some devices violate * this. * * To make these devices work, the internal SSP * enabled flag needs to be cleared if the remote host * features do not indicate SSP support */ clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } if (ev->features[0] & LMP_HOST_SC) set_bit(HCI_CONN_SC_ENABLED, &conn->flags); } if (conn->state != BT_CONFIG) goto unlock; if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else { mgmt_device_connected(hdev, conn, NULL, 0); } if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } unlock: hci_dev_unlock(hdev); } static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_sync_conn_complete *ev = data; struct hci_conn *conn; u8 status = ev->status; switch (ev->link_type) { case SCO_LINK: case ESCO_LINK: break; default: /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type * for HCI_Synchronous_Connection_Complete is limited to * either SCO or eSCO */ bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); return; } bt_dev_dbg(hdev, "status 0x%2.2x", status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { if (ev->link_type == ESCO_LINK) goto unlock; /* When the link type in the event indicates SCO connection * and lookup of the connection object fails, then check * if an eSCO connection object exists. * * The core limits the synchronous connections to either * SCO or eSCO. The eSCO connection is preferred and tried * to be setup first and until successfully established, * the link type will be hinted as eSCO. */ conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); if (!conn) goto unlock; } /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. * Processing it more than once per connection can corrupt kernel memory. * * As the connection handle is set here for the first time, it indicates * whether the connection is already set up. */ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); goto unlock; } switch (status) { case 0x00: status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); if (status) { conn->state = BT_CLOSED; break; } conn->state = BT_CONNECTED; conn->type = ev->link_type; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); break; case 0x10: /* Connection Accept Timeout */ case 0x0d: /* Connection Rejected due to Limited Resources */ case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ case 0x1e: /* Invalid LMP Parameters */ case 0x1f: /* Unspecified error */ case 0x20: /* Unsupported LMP Parameter value */ if (conn->out) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | (hdev->esco_type & EDR_ESCO_MASK); if (hci_setup_sync(conn, conn->parent->handle)) goto unlock; } fallthrough; default: conn->state = BT_CLOSED; break; } bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); /* Notify only in case of SCO over HCI transport data path which * is zero and non-zero value shall be non-HCI transport data path */ if (conn->codec.data_path == 0 && hdev->notify) { switch (ev->air_mode) { case 0x02: hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); break; case 0x03: hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); break; } } hci_connect_cfm(conn, status); if (status) hci_conn_del(conn); unlock: hci_dev_unlock(hdev); } static inline size_t eir_get_length(u8 *eir, size_t eir_len) { size_t parsed = 0; while (parsed < eir_len) { u8 field_len = eir[0]; if (field_len == 0) return parsed; parsed += field_len + 1; eir += field_len + 1; } return eir_len; } static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_ext_inquiry_result *ev = edata; struct inquiry_data data; size_t eir_len; int i; if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, flex_array_size(ev, info, ev->num))) return; bt_dev_dbg(hdev, "num %d", ev->num); if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); for (i = 0; i < ev->num; i++) { struct extended_inquiry_info *info = &ev->info[i]; u32 flags; bool name_known; bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = 0x00; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x01; if (hci_dev_test_flag(hdev, HCI_MGMT)) name_known = eir_get_data(info->data, sizeof(info->data), EIR_NAME_COMPLETE, NULL); else name_known = true; flags = hci_inquiry_cache_update(hdev, &data, name_known); eir_len = eir_get_length(info->data, sizeof(info->data)); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, flags, info->data, eir_len, NULL, 0, 0); } hci_dev_unlock(hdev); } static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_key_refresh_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, __le16_to_cpu(ev->handle)); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; /* For BR/EDR the necessary steps are taken through the * auth_complete event. */ if (conn->type != LE_LINK) goto unlock; if (!ev->status) conn->sec_level = conn->pending_sec_level; clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); if (ev->status && conn->state == BT_CONNECTED) { hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } if (conn->state == BT_CONFIG) { if (!ev->status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } else { hci_auth_cfm(conn, ev->status); hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(conn); } unlock: hci_dev_unlock(hdev); } static u8 hci_get_auth_req(struct hci_conn *conn) { /* If remote requests no-bonding follow that lead */ if (conn->remote_auth == HCI_AT_NO_BONDING || conn->remote_auth == HCI_AT_NO_BONDING_MITM) return conn->remote_auth | (conn->auth_type & 0x01); /* If both remote and local have enough IO capabilities, require * MITM protection */ if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) return conn->remote_auth | 0x01; /* No MITM protection possible so ignore remote requirement */ return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); } static u8 bredr_oob_data_present(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct oob_data *data; data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); if (!data) return 0x00; if (bredr_sc_enabled(hdev)) { /* When Secure Connections is enabled, then just * return the present value stored with the OOB * data. The stored value contains the right present * information. However it can only be trusted when * not in Secure Connection Only mode. */ if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) return data->present; /* When Secure Connections Only mode is enabled, then * the P-256 values are required. If they are not * available, then do not declare that OOB data is * present. */ if (!crypto_memneq(data->rand256, ZERO_KEY, 16) || !crypto_memneq(data->hash256, ZERO_KEY, 16)) return 0x00; return 0x02; } /* When Secure Connections is not enabled or actually * not supported by the hardware, then check that if * P-192 data values are present. */ if (!crypto_memneq(data->rand192, ZERO_KEY, 16) || !crypto_memneq(data->hash192, ZERO_KEY, 16)) return 0x00; return 0x01; } static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_io_capa_request *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) goto unlock; /* Assume remote supports SSP since it has triggered this event */ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); hci_conn_hold(conn); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; /* Allow pairing if we're pairable, the initiators of the * pairing or if the remote is not requesting bonding. */ if (hci_dev_test_flag(hdev, HCI_BONDABLE) || test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { struct hci_cp_io_capability_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); /* Change the IO capability from KeyboardDisplay * to DisplayYesNo as it is not supported by BT spec. */ cp.capability = (conn->io_capability == 0x04) ? HCI_IO_DISPLAY_YESNO : conn->io_capability; /* If we are initiators, there is no remote information yet */ if (conn->remote_auth == 0xff) { /* Request MITM protection if our IO caps allow it * except for the no-bonding case. */ if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && conn->auth_type != HCI_AT_NO_BONDING) conn->auth_type |= 0x01; } else { conn->auth_type = hci_get_auth_req(conn); } /* If we're not bondable, force one of the non-bondable * authentication requirement values. */ if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) conn->auth_type &= HCI_AT_NO_BONDING_MITM; cp.authentication = conn->auth_type; cp.oob_data = bredr_oob_data_present(conn); hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, sizeof(cp), &cp); } else { struct hci_cp_io_capability_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_io_capa_reply *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; conn->remote_cap = ev->capability; conn->remote_auth = ev->authentication; unlock: hci_dev_unlock(hdev); } static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_user_confirm_req *ev = data; int loc_mitm, rem_mitm, confirm_hint = 0; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; loc_mitm = (conn->auth_type & 0x01); rem_mitm = (conn->remote_auth & 0x01); /* If we require MITM but the remote device can't provide that * (it has NoInputNoOutput) then reject the confirmation * request. We check the security level here since it doesn't * necessarily match conn->auth_type. */ if (conn->pending_sec_level > BT_SECURITY_MEDIUM && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } /* If no side requires MITM protection; use JUST_CFM method */ if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { /* If we're not the initiator of request authorization and the * local IO capability is not NoInputNoOutput, use JUST_WORKS * method (mgmt_user_confirm with confirm_hint set to 1). */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) { bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); confirm_hint = 1; goto confirm; } /* If there already exists link key in local host, leave the * decision to user space since the remote device could be * legitimate or malicious. */ if (hci_find_link_key(hdev, &ev->bdaddr)) { bt_dev_dbg(hdev, "Local host already has link key"); confirm_hint = 1; goto confirm; } BT_DBG("Auto-accept of user confirmation with %ums delay", hdev->auto_accept_delay); if (hdev->auto_accept_delay > 0) { int delay = msecs_to_jiffies(hdev->auto_accept_delay); queue_delayed_work(conn->hdev->workqueue, &conn->auto_accept_work, delay); goto unlock; } hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } confirm: mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, le32_to_cpu(ev->passkey), confirm_hint); unlock: hci_dev_unlock(hdev); } static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_user_passkey_req *ev = data; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); } static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_user_passkey_notify *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) return; conn->passkey_notify = __le32_to_cpu(ev->passkey); conn->passkey_entered = 0; if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, conn->dst_type, conn->passkey_notify, conn->passkey_entered); } static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_keypress_notify *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) return; switch (ev->type) { case HCI_KEYPRESS_STARTED: conn->passkey_entered = 0; return; case HCI_KEYPRESS_ENTERED: conn->passkey_entered++; break; case HCI_KEYPRESS_ERASED: conn->passkey_entered--; break; case HCI_KEYPRESS_CLEARED: conn->passkey_entered = 0; break; case HCI_KEYPRESS_COMPLETED: return; } if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, conn->dst_type, conn->passkey_notify, conn->passkey_entered); } static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_simple_pair_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn || !hci_conn_ssp_enabled(conn)) goto unlock; /* Reset the authentication requirement to unknown */ conn->remote_auth = 0xff; /* To avoid duplicate auth_failed events to user space we check * the HCI_CONN_AUTH_PEND flag which will be set if we * initiated the authentication. A traditional auth_complete * event gets always produced as initiator and is also mapped to * the mgmt_auth_failed event */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) mgmt_auth_failed(conn, ev->status); hci_conn_drop(conn); unlock: hci_dev_unlock(hdev); } static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_host_features *ev = data; struct inquiry_entry *ie; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) memcpy(conn->features[1], ev->features, 8); ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); hci_dev_unlock(hdev); } static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_remote_oob_data_request *ev = edata; struct oob_data *data; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); if (!data) { struct hci_cp_remote_oob_data_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), &cp); goto unlock; } if (bredr_sc_enabled(hdev)) { struct hci_cp_remote_oob_ext_data_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { memset(cp.hash192, 0, sizeof(cp.hash192)); memset(cp.rand192, 0, sizeof(cp.rand192)); } else { memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); } memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, sizeof(cp), &cp); } else { struct hci_cp_remote_oob_data_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.hash, data->hash192, sizeof(cp.hash)); memcpy(cp.rand, data->rand192, sizeof(cp.rand)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa) { if (conn->out) { conn->dst_type = bdaddr_type; conn->resp_addr_type = bdaddr_type; bacpy(&conn->resp_addr, bdaddr); /* Check if the controller has set a Local RPA then it must be * used instead or hdev->rpa. */ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, local_rpa); } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, &conn->hdev->rpa); } else { hci_copy_identity_address(conn->hdev, &conn->init_addr, &conn->init_addr_type); } } else { conn->resp_addr_type = conn->hdev->adv_addr_type; /* Check if the controller has set a Local RPA then it must be * used instead or hdev->rpa. */ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { conn->resp_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->resp_addr, local_rpa); } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { /* In case of ext adv, resp_addr will be updated in * Adv Terminated event. */ if (!ext_adv_capable(conn->hdev)) bacpy(&conn->resp_addr, &conn->hdev->random_addr); } else { bacpy(&conn->resp_addr, &conn->hdev->bdaddr); } conn->init_addr_type = bdaddr_type; bacpy(&conn->init_addr, bdaddr); /* For incoming connections, set the default minimum * and maximum connection interval. They will be used * to check if the parameters are in range and if not * trigger the connection update procedure. */ conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; } } static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa, u8 role, u16 handle, u16 interval, u16 latency, u16 supervision_timeout) { struct hci_conn_params *params; struct hci_conn *conn; struct smp_irk *irk; u8 addr_type; hci_dev_lock(hdev); /* All controllers implicitly stop advertising in the event of a * connection, so ensure that the state bit is cleared. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Check for existing connection: * * 1. If it doesn't exist then use the role to create a new object. * 2. If it does exist confirm that it is connecting/BT_CONNECT in case * of initiator/master role since there could be a collision where * either side is attempting to connect or something like a fuzzing * testing is trying to play tricks to destroy the hcon object before * it even attempts to connect (e.g. hcon->state == BT_OPEN). */ conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr); if (!conn || (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) { /* In case of error status and there is no connection pending * just unlock as there is nothing to cleanup. */ if (status) goto unlock; conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role); if (IS_ERR(conn)) { bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); goto unlock; } conn->dst_type = bdaddr_type; /* If we didn't have a hci_conn object previously * but we're in central role this must be something * initiated using an accept list. Since accept list based * connections are not "first class citizens" we don't * have full tracking of them. Therefore, we go ahead * with a "best effort" approach of determining the * initiator address based on the HCI_PRIVACY flag. */ if (conn->out) { conn->resp_addr_type = bdaddr_type; bacpy(&conn->resp_addr, bdaddr); if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, &hdev->rpa); } else { hci_copy_identity_address(hdev, &conn->init_addr, &conn->init_addr_type); } } } else { cancel_delayed_work(&conn->le_conn_timeout); } /* The HCI_LE_Connection_Complete event is only sent once per connection. * Processing it more than once per connection can corrupt kernel memory. * * As the connection handle is set here for the first time, it indicates * whether the connection is already set up. */ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); goto unlock; } le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); /* Lookup the identity address from the stored connection * address and address type. * * When establishing connections to an identity address, the * connection procedure will store the resolvable random * address first. Now if it can be converted back into the * identity address, start using the identity address from * now on. */ irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); if (irk) { bacpy(&conn->dst, &irk->bdaddr); conn->dst_type = irk->addr_type; } conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); /* All connection failure handling is taken care of by the * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status || hci_conn_set_handle(conn, handle)) goto unlock; /* Drop the connection if it has been aborted */ if (test_bit(HCI_CONN_CANCEL, &conn->flags)) { hci_conn_drop(conn); goto unlock; } if (conn->dst_type == ADDR_LE_DEV_PUBLIC) addr_type = BDADDR_LE_PUBLIC; else addr_type = BDADDR_LE_RANDOM; /* Drop the connection if the device is blocked */ if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { hci_conn_drop(conn); goto unlock; } mgmt_device_connected(hdev, conn, NULL, 0); conn->sec_level = BT_SECURITY_LOW; conn->state = BT_CONFIG; /* Store current advertising instance as connection advertising instance * when software rotation is in use so it can be re-enabled when * disconnected. */ if (!ext_adv_capable(hdev)) conn->adv_instance = hdev->cur_adv_instance; conn->le_conn_interval = interval; conn->le_conn_latency = latency; conn->le_supv_timeout = supervision_timeout; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); /* The remote features procedure is defined for central * role only. So only in case of an initiated connection * request the remote features. * * If the local controller supports peripheral-initiated features * exchange, then requesting the remote features in peripheral * role is possible. Otherwise just transition into the * connected state without requesting the remote features. */ if (conn->out || (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { struct hci_cp_le_read_remote_features cp; cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, sizeof(cp), &cp); hci_conn_hold(conn); } else { conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); } params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, conn->dst_type); if (params) { hci_pend_le_list_del_init(params); if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); params->conn = NULL; } } unlock: hci_update_passive_scan(hdev); hci_dev_unlock(hdev); } static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_conn_complete *ev = data; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, NULL, ev->role, le16_to_cpu(ev->handle), le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); } static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_enh_conn_complete *ev = data; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); } static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_ext_adv_set_term *ev = data; struct hci_conn *conn; struct adv_info *adv, *n; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); /* The Bluetooth Core 5.3 specification clearly states that this event * shall not be sent when the Host disables the advertising set. So in * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. * * When the Host disables an advertising set, all cleanup is done via * its command callback and not needed to be duplicated here. */ if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); return; } hci_dev_lock(hdev); adv = hci_find_adv_instance(hdev, ev->handle); if (ev->status) { if (!adv) goto unlock; /* Remove advertising as it has been terminated */ hci_remove_adv_instance(hdev, ev->handle); mgmt_advertising_removed(NULL, hdev, ev->handle); list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { if (adv->enabled) goto unlock; } /* We are no longer advertising, clear HCI_LE_ADV */ hci_dev_clear_flag(hdev, HCI_LE_ADV); goto unlock; } if (adv) adv->enabled = false; conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); if (conn) { /* Store handle in the connection so the correct advertising * instance can be re-enabled when disconnected. */ conn->adv_instance = ev->handle; if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || bacmp(&conn->resp_addr, BDADDR_ANY)) goto unlock; if (!ev->handle) { bacpy(&conn->resp_addr, &hdev->random_addr); goto unlock; } if (adv) bacpy(&conn->resp_addr, &adv->random_addr); } unlock: hci_dev_unlock(hdev); } static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_conn_update_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { conn->le_conn_interval = le16_to_cpu(ev->interval); conn->le_conn_latency = le16_to_cpu(ev->latency); conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); } hci_dev_unlock(hdev); } /* This function requires the caller holds hdev->lock */ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, bool addr_resolved, u8 adv_type, u8 phy, u8 sec_phy) { struct hci_conn *conn; struct hci_conn_params *params; /* If the event is not connectable don't proceed further */ if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) return NULL; /* Ignore if the device is blocked or hdev is suspended */ if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || hdev->suspended) return NULL; /* Most controller will fail if we try to create new connections * while we have an existing one in peripheral role. */ if (hdev->conn_hash.le_num_peripheral > 0 && (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) || !(hdev->le_states[3] & 0x10))) return NULL; /* If we're not connectable only connect devices that we have in * our pend_le_conns list. */ params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type); if (!params) return NULL; if (!params->explicit_connect) { switch (params->auto_connect) { case HCI_AUTO_CONN_DIRECT: /* Only devices advertising with ADV_DIRECT_IND are * triggering a connection attempt. This is allowing * incoming connections from peripheral devices. */ if (adv_type != LE_ADV_DIRECT_IND) return NULL; break; case HCI_AUTO_CONN_ALWAYS: /* Devices advertising with ADV_IND or ADV_DIRECT_IND * are triggering a connection attempt. This means * that incoming connections from peripheral device are * accepted and also outgoing connections to peripheral * devices are established when found. */ break; default: return NULL; } } conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER, phy, sec_phy); if (!IS_ERR(conn)) { /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned * by higher layer that tried to connect, if no then * store the pointer since we don't really have any * other owner of the object besides the params that * triggered it. This way we can abort the connection if * the parameters get removed and keep the reference * count consistent once the connection is established. */ if (!params->explicit_connect) params->conn = hci_conn_get(conn); return conn; } switch (PTR_ERR(conn)) { case -EBUSY: /* If hci_connect() returns -EBUSY it means there is already * an LE connection attempt going on. Since controllers don't * support more than one connection attempt at the time, we * don't consider this an error case. */ break; default: BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); return NULL; } return NULL; } static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi, u8 *data, u8 len, bool ext_adv, bool ctl_time, u64 instant) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; struct hci_conn *conn; bool match, bdaddr_resolved; u32 flags; u8 *ptr; switch (type) { case LE_ADV_IND: case LE_ADV_DIRECT_IND: case LE_ADV_SCAN_IND: case LE_ADV_NONCONN_IND: case LE_ADV_SCAN_RSP: break; default: bt_dev_err_ratelimited(hdev, "unknown advertising packet " "type: 0x%02x", type); return; } if (len > max_adv_len(hdev)) { bt_dev_err_ratelimited(hdev, "adv larger than maximum supported"); return; } /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * * When data is NULL, len is 0 so there is no need for extra ptr * check as 'ptr < data + 0' is already false in such case. */ for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { if (ptr + 1 + *ptr > data + len) break; } /* Adjust for actual length. This handles the case when remote * device is advertising with incorrect data length. */ len = ptr - data; /* If the direct address is present, then this report is from * a LE Direct Advertising Report event. In that case it is * important to see if the address is matching the local * controller address. * * If local privacy is not enable the controller shall not be * generating such event since according to its documentation it is only * valid for filter_policy 0x02 and 0x03, but the fact that it did * generate LE Direct Advertising Report means it is probably broken and * won't generate any other event which can potentially break * auto-connect logic so in case local privacy is not enable this * ignores the direct_addr so it works as a regular report. */ if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr && hci_dev_test_flag(hdev, HCI_PRIVACY)) { direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, &bdaddr_resolved); /* Only resolvable random addresses are valid for these * kind of reports and others can be ignored. */ if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) return; /* If the local IRK of the controller does not match * with the resolvable random address provided, then * this report can be ignored. */ if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) return; } /* Check if we need to convert to identity address */ irk = hci_get_irk(hdev, bdaddr, bdaddr_type); if (irk) { bdaddr = &irk->bdaddr; bdaddr_type = irk->addr_type; } bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); /* Check if we have been requested to connect to this device. * * direct_addr is set only for directed advertising reports (it is NULL * for advertising reports) and is already verified to be RPA above. */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, type, phy, sec_phy); if (!ext_adv && conn && type == LE_ADV_IND && len <= max_adv_len(hdev)) { /* Store report for later inclusion by * mgmt_device_connected */ memcpy(conn->le_adv_data, data, len); conn->le_adv_data_len = len; } if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; else flags = 0; /* All scan results should be sent up for Mesh systems */ if (hci_dev_test_flag(hdev, HCI_MESH)) { mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, instant); return; } /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send * device found events, or advertisement monitoring requested. */ if (hdev->le_scan_type == LE_SCAN_PASSIVE) { if (type == LE_ADV_DIRECT_IND) return; if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, bdaddr, bdaddr_type) && idr_is_empty(&hdev->adv_monitors_idr)) return; mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, 0); return; } /* When receiving a scan response, then there is no way to * know if the remote device is connectable or not. However * since scan responses are merged with a previously seen * advertising report, the flags field from that report * will be used. * * In the unlikely case that a controller just sends a scan * response event that doesn't match the pending report, then * it is marked as a standalone SCAN_RSP. */ if (type == LE_ADV_SCAN_RSP) flags = MGMT_DEV_FOUND_SCAN_RSP; /* If there's nothing pending either store the data from this * event or send an immediate device found event if the data * should not be stored for later. */ if (!has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ if (!ext_adv && (type == LE_ADV_IND || type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; } mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, 0); return; } /* Check if the pending report is for the same device as the new one */ match = (!bacmp(bdaddr, &d->last_adv_addr) && bdaddr_type == d->last_adv_addr_type); /* If the pending data doesn't match this report or this isn't a * scan response (e.g. we got a duplicate ADV_IND) then force * sending of the pending data. */ if (type != LE_ADV_SCAN_RSP || !match) { /* Send out whatever is in the cache, but skip duplicates */ if (!match) mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, d->last_adv_addr_type, NULL, d->last_adv_rssi, d->last_adv_flags, d->last_adv_data, d->last_adv_data_len, NULL, 0, 0); /* If the new report will trigger a SCAN_REQ store it for * later merging. */ if (!ext_adv && (type == LE_ADV_IND || type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; } /* The advertising reports cannot be merged, so clear * the pending report and send out a device found event. */ clear_pending_adv_report(hdev); mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, 0); return; } /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and * the new event is a SCAN_RSP. We can therefore proceed with * sending a merged device found event. */ mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, d->last_adv_data, d->last_adv_data_len, data, len, 0); clear_pending_adv_report(hdev); } static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_advertising_report *ev = data; u64 instant = jiffies; if (!ev->num) return; hci_dev_lock(hdev); while (ev->num--) { struct hci_ev_le_advertising_info *info; s8 rssi; info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, sizeof(*info)); if (!info) break; if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, info->length + 1)) break; if (info->length <= max_adv_len(hdev)) { rssi = info->data[info->length]; process_adv_report(hdev, info->type, &info->bdaddr, info->bdaddr_type, NULL, 0, HCI_ADV_PHY_1M, 0, rssi, info->data, info->length, false, false, instant); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } } hci_dev_unlock(hdev); } static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) { u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK; if (!pdu_type) return LE_ADV_NONCONN_IND; if (evt_type & LE_EXT_ADV_LEGACY_PDU) { switch (evt_type) { case LE_LEGACY_ADV_IND: return LE_ADV_IND; case LE_LEGACY_ADV_DIRECT_IND: return LE_ADV_DIRECT_IND; case LE_LEGACY_ADV_SCAN_IND: return LE_ADV_SCAN_IND; case LE_LEGACY_NONCONN_IND: return LE_ADV_NONCONN_IND; case LE_LEGACY_SCAN_RSP_ADV: case LE_LEGACY_SCAN_RSP_ADV_SCAN: return LE_ADV_SCAN_RSP; } goto invalid; } if (evt_type & LE_EXT_ADV_CONN_IND) { if (evt_type & LE_EXT_ADV_DIRECT_IND) return LE_ADV_DIRECT_IND; return LE_ADV_IND; } if (evt_type & LE_EXT_ADV_SCAN_RSP) return LE_ADV_SCAN_RSP; if (evt_type & LE_EXT_ADV_SCAN_IND) return LE_ADV_SCAN_IND; if (evt_type & LE_EXT_ADV_DIRECT_IND) return LE_ADV_NONCONN_IND; invalid: bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", evt_type); return LE_ADV_INVALID; } static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_ext_adv_report *ev = data; u64 instant = jiffies; if (!ev->num) return; hci_dev_lock(hdev); while (ev->num--) { struct hci_ev_le_ext_adv_info *info; u8 legacy_evt_type; u16 evt_type; info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, sizeof(*info)); if (!info) break; if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, info->length)) break; evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK; legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) { info->primary_phy &= 0x1f; info->secondary_phy &= 0x1f; } /* Check if PA Sync is pending and if the hci_conn SID has not * been set update it. */ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { struct hci_conn *conn; conn = hci_conn_hash_lookup_create_pa_sync(hdev); if (conn && conn->sid == HCI_SID_INVALID) conn->sid = info->sid; } if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &info->bdaddr, info->bdaddr_type, NULL, 0, info->primary_phy, info->secondary_phy, info->rssi, info->data, info->length, !(evt_type & LE_EXT_ADV_LEGACY_PDU), false, instant); } } hci_dev_unlock(hdev); } static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) { struct hci_cp_le_pa_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); } static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_pa_sync_established *ev = data; int mask = hdev->link_mode; __u8 flags = 0; struct hci_conn *pa_sync, *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); hci_dev_clear_flag(hdev, HCI_PA_SYNC); conn = hci_conn_hash_lookup_create_pa_sync(hdev); if (!conn) { bt_dev_err(hdev, "Unable to find connection for dst %pMR sid 0x%2.2x", &ev->bdaddr, ev->sid); goto unlock; } clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); conn->sync_handle = le16_to_cpu(ev->handle); conn->sid = HCI_SID_INVALID; mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_le_pa_term_sync(hdev, ev->handle); goto unlock; } if (!(flags & HCI_PROTO_DEFER)) goto unlock; /* Add connection to indicate PA sync event */ pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, HCI_ROLE_SLAVE); if (IS_ERR(pa_sync)) goto unlock; pa_sync->sync_handle = le16_to_cpu(ev->handle); if (ev->status) { set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); /* Notify iso layer */ hci_connect_cfm(pa_sync, ev->status); } unlock: hci_dev_unlock(hdev); } static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_per_adv_report *ev = data; int mask = hdev->link_mode; __u8 flags = 0; struct hci_conn *pa_sync; bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); hci_dev_lock(hdev); mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) goto unlock; if (!(flags & HCI_PROTO_DEFER)) goto unlock; pa_sync = hci_conn_hash_lookup_pa_sync_handle (hdev, le16_to_cpu(ev->sync_handle)); if (!pa_sync) goto unlock; if (ev->data_status == LE_PA_DATA_COMPLETE && !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) { /* Notify iso layer */ hci_connect_cfm(pa_sync, 0); /* Notify MGMT layer */ mgmt_device_connected(hdev, pa_sync, NULL, 0); } unlock: hci_dev_unlock(hdev); } static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_remote_feat_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { if (!ev->status) memcpy(conn->features[0], ev->features, 8); if (conn->state == BT_CONFIG) { __u8 status; /* If the local controller supports peripheral-initiated * features exchange, but the remote controller does * not, then it is possible that the error code 0x1a * for unsupported remote feature gets returned. * * In this specific case, allow the connection to * transition into connected state and mark it as * successful. */ if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE && (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) status = 0x00; else status = ev->status; conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_ltk_req *ev = data; struct hci_cp_le_ltk_reply cp; struct hci_cp_le_ltk_neg_reply neg; struct hci_conn *conn; struct smp_ltk *ltk; bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn == NULL) goto not_found; ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); if (!ltk) goto not_found; if (smp_ltk_is_sc(ltk)) { /* With SC both EDiv and Rand are set to zero */ if (ev->ediv || ev->rand) goto not_found; } else { /* For non-SC keys check that EDiv and Rand match */ if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) goto not_found; } memcpy(cp.ltk, ltk->val, ltk->enc_size); memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); cp.handle = cpu_to_le16(conn->handle); conn->pending_sec_level = smp_ltk_sec_level(ltk); conn->enc_key_size = ltk->enc_size; hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a * temporary key used to encrypt a connection following * pairing. It is used during the Encrypted Session Setup to * distribute the keys. Later, security can be re-established * using a distributed LTK. */ if (ltk->type == SMP_STK) { set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); list_del_rcu(&ltk->list); kfree_rcu(ltk, rcu); } else { clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); } hci_dev_unlock(hdev); return; not_found: neg.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); hci_dev_unlock(hdev); } static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, u8 reason) { struct hci_cp_le_conn_param_req_neg_reply cp; cp.handle = cpu_to_le16(handle); cp.reason = reason; hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), &cp); } static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_remote_conn_param_req *ev = data; struct hci_cp_le_conn_param_req_reply cp; struct hci_conn *hcon; u16 handle, min, max, latency, timeout; bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); handle = le16_to_cpu(ev->handle); min = le16_to_cpu(ev->interval_min); max = le16_to_cpu(ev->interval_max); latency = le16_to_cpu(ev->latency); timeout = le16_to_cpu(ev->timeout); hcon = hci_conn_hash_lookup_handle(hdev, handle); if (!hcon || hcon->state != BT_CONNECTED) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_UNKNOWN_CONN_ID); if (max > hcon->le_conn_max_interval) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_INVALID_LL_PARAMS); if (hci_check_conn_params(min, max, latency, timeout)) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_INVALID_LL_PARAMS); if (hcon->role == HCI_ROLE_MASTER) { struct hci_conn_params *params; u8 store_hint; hci_dev_lock(hdev); params = hci_conn_params_lookup(hdev, &hcon->dst, hcon->dst_type); if (params) { params->conn_min_interval = min; params->conn_max_interval = max; params->conn_latency = latency; params->supervision_timeout = timeout; store_hint = 0x01; } else { store_hint = 0x00; } hci_dev_unlock(hdev); mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, store_hint, min, max, latency, timeout); } cp.handle = ev->handle; cp.interval_min = ev->interval_min; cp.interval_max = ev->interval_max; cp.latency = ev->latency; cp.timeout = ev->timeout; cp.min_ce_len = 0; cp.max_ce_len = 0; hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); } static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_direct_adv_report *ev = data; u64 instant = jiffies; int i; if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, flex_array_size(ev, info, ev->num))) return; if (!ev->num) return; hci_dev_lock(hdev); for (i = 0; i < ev->num; i++) { struct hci_ev_le_direct_adv_info *info = &ev->info[i]; process_adv_report(hdev, info->type, &info->bdaddr, info->bdaddr_type, &info->direct_addr, info->direct_addr_type, HCI_ADV_PHY_1M, 0, info->rssi, NULL, 0, false, false, instant); } hci_dev_unlock(hdev); } static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_phy_update_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; conn->le_tx_phy = ev->tx_phy; conn->le_rx_phy = ev->rx_phy; unlock: hci_dev_unlock(hdev); } static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_cis_established *ev = data; struct hci_conn *conn; struct bt_iso_qos *qos; bool pending = false; u16 handle = __le16_to_cpu(ev->handle); u32 c_sdu_interval, p_sdu_interval; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) { bt_dev_err(hdev, "Unable to find connection with handle 0x%4.4x", handle); goto unlock; } if (conn->type != CIS_LINK) { bt_dev_err(hdev, "Invalid connection link type handle 0x%4.4x", handle); goto unlock; } qos = &conn->iso_qos; pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags); /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G * page 3075: * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) × * ISO_Interval + SDU_Interval_C_To_P * ... * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) - * Transport_Latency */ c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) - get_unaligned_le24(ev->c_latency); p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) + (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) - get_unaligned_le24(ev->p_latency); switch (conn->role) { case HCI_ROLE_SLAVE: qos->ucast.in.interval = c_sdu_interval; qos->ucast.out.interval = p_sdu_interval; /* Convert Transport Latency (us) to Latency (msec) */ qos->ucast.in.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 1000); qos->ucast.out.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 1000); qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; qos->ucast.in.phy = ev->c_phy; qos->ucast.out.phy = ev->p_phy; break; case HCI_ROLE_MASTER: qos->ucast.in.interval = p_sdu_interval; qos->ucast.out.interval = c_sdu_interval; /* Convert Transport Latency (us) to Latency (msec) */ qos->ucast.out.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 1000); qos->ucast.in.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 1000); qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0; qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0; qos->ucast.out.phy = ev->c_phy; qos->ucast.in.phy = ev->p_phy; break; } if (!ev->status) { conn->state = BT_CONNECTED; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); hci_iso_setup_path(conn); goto unlock; } conn->state = BT_CLOSED; hci_connect_cfm(conn, ev->status); hci_conn_del(conn); unlock: if (pending) hci_le_create_cis_pending(hdev); hci_dev_unlock(hdev); } static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) { struct hci_cp_le_reject_cis cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; cp.reason = HCI_ERROR_REJ_BAD_ADDR; hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); } static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) { struct hci_cp_le_accept_cis cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); } static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_cis_req *ev = data; u16 acl_handle, cis_handle; struct hci_conn *acl, *cis; int mask; __u8 flags = 0; acl_handle = __le16_to_cpu(ev->acl_handle); cis_handle = __le16_to_cpu(ev->cis_handle); bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", acl_handle, cis_handle, ev->cig_id, ev->cis_id); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, acl_handle); if (!acl) goto unlock; mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; } cis = hci_conn_hash_lookup_handle(hdev, cis_handle); if (!cis) { cis = hci_conn_add(hdev, CIS_LINK, &acl->dst, HCI_ROLE_SLAVE, cis_handle); if (IS_ERR(cis)) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; } } cis->iso_qos.ucast.cig = ev->cig_id; cis->iso_qos.ucast.cis = ev->cis_id; if (!(flags & HCI_PROTO_DEFER)) { hci_le_accept_cis(hdev, ev->cis_handle); } else { cis->state = BT_CONNECT2; hci_connect_cfm(cis, 0); } unlock: hci_dev_unlock(hdev); } static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data) { u8 handle = PTR_UINT(data); return hci_le_terminate_big_sync(hdev, handle, HCI_ERROR_LOCAL_HOST_TERM); } static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_create_big_complete *ev = data; struct hci_conn *conn; __u8 i = 0; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, flex_array_size(ev, bis_handle, ev->num_bis))) return; hci_dev_lock(hdev); /* Connect all BISes that are bound to the BIG */ while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle, BT_BOUND, HCI_ROLE_MASTER))) { if (ev->status) { hci_connect_cfm(conn, ev->status); hci_conn_del(conn); continue; } if (hci_conn_set_handle(conn, __le16_to_cpu(ev->bis_handle[i++]))) continue; conn->state = BT_CONNECTED; set_bit(HCI_CONN_BIG_CREATED, &conn->flags); hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); hci_iso_setup_path(conn); } if (!ev->status && !i) /* If no BISes have been connected for the BIG, * terminate. This is in case all bound connections * have been closed before the BIG creation * has completed. */ hci_cmd_sync_queue(hdev, hci_iso_term_big_sync, UINT_PTR(ev->handle), NULL); hci_dev_unlock(hdev); } static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_big_sync_established *ev = data; struct hci_conn *bis, *conn; int i; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED, flex_array_size(ev, bis, ev->num_bis))) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle, ev->num_bis); if (!conn) { bt_dev_err(hdev, "Unable to find connection for big 0x%2.2x", ev->handle); goto unlock; } clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); conn->num_bis = 0; memset(conn->bis, 0, sizeof(conn->num_bis)); for (i = 0; i < ev->num_bis; i++) { u16 handle = le16_to_cpu(ev->bis[i]); __le32 interval; bis = hci_conn_hash_lookup_handle(hdev, handle); if (!bis) { if (handle > HCI_CONN_HANDLE_MAX) { bt_dev_dbg(hdev, "ignore too large handle %u", handle); continue; } bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY, HCI_ROLE_SLAVE, handle); if (IS_ERR(bis)) continue; } if (ev->status != 0x42) { /* Mark PA sync as established */ set_bit(HCI_CONN_PA_SYNC, &bis->flags); /* Reset cleanup callback of PA Sync so it doesn't * terminate the sync when deleting the connection. */ conn->cleanup = NULL; } bis->sync_handle = conn->sync_handle; bis->iso_qos.bcast.big = ev->handle; memset(&interval, 0, sizeof(interval)); memcpy(&interval, ev->latency, sizeof(ev->latency)); bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); if (!ev->status) { bis->state = BT_CONNECTED; set_bit(HCI_CONN_BIG_SYNC, &bis->flags); hci_debugfs_create_conn(bis); hci_conn_add_sysfs(bis); hci_iso_setup_path(bis); } } /* In case BIG sync failed, notify each failed connection to * the user after all hci connections have been added */ if (ev->status) for (i = 0; i < ev->num_bis; i++) { u16 handle = le16_to_cpu(ev->bis[i]); bis = hci_conn_hash_lookup_handle(hdev, handle); if (!bis) continue; set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags); hci_connect_cfm(bis, ev->status); } unlock: hci_dev_unlock(hdev); } static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_big_sync_lost *ev = data; struct hci_conn *bis, *conn; bool mgmt_conn; bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle); hci_dev_lock(hdev); /* Delete the pa sync connection */ bis = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle); if (bis) { conn = hci_conn_hash_lookup_pa_sync_handle(hdev, bis->sync_handle); if (conn) hci_conn_del(conn); } /* Delete each bis connection */ while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle, BT_CONNECTED, HCI_ROLE_SLAVE))) { mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &bis->flags); mgmt_device_disconnected(hdev, &bis->dst, bis->type, bis->dst_type, ev->reason, mgmt_conn); clear_bit(HCI_CONN_BIG_SYNC, &bis->flags); hci_disconn_cfm(bis, ev->reason); hci_conn_del(bis); } hci_dev_unlock(hdev); } static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_big_info_adv_report *ev = data; int mask = hdev->link_mode; __u8 flags = 0; struct hci_conn *pa_sync; bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); hci_dev_lock(hdev); mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) goto unlock; if (!(flags & HCI_PROTO_DEFER)) goto unlock; pa_sync = hci_conn_hash_lookup_pa_sync_handle (hdev, le16_to_cpu(ev->sync_handle)); if (!pa_sync) goto unlock; pa_sync->iso_qos.bcast.encryption = ev->encryption; /* Notify iso layer */ hci_connect_cfm(pa_sync, 0); unlock: hci_dev_unlock(hdev); } #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .func = _func, \ .min_len = _min_len, \ .max_len = _max_len, \ } #define HCI_LE_EV(_op, _func, _len) \ HCI_LE_EV_VL(_op, _func, _len, _len) #define HCI_LE_EV_STATUS(_op, _func) \ HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) /* Entries in this table shall have their position according to the subevent * opcode they handle so the use of the macros above is recommend since it does * attempt to initialize at its proper index using Designated Initializers that * way events without a callback function can be omitted. */ static const struct hci_le_ev { void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); u16 min_len; u16 max_len; } hci_le_ev_table[U8_MAX + 1] = { /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, sizeof(struct hci_ev_le_conn_complete)), /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, sizeof(struct hci_ev_le_advertising_report), HCI_MAX_EVENT_SIZE), /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, hci_le_conn_update_complete_evt, sizeof(struct hci_ev_le_conn_update_complete)), /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, hci_le_remote_feat_complete_evt, sizeof(struct hci_ev_le_remote_feat_complete)), /* [0x05 = HCI_EV_LE_LTK_REQ] */ HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, sizeof(struct hci_ev_le_ltk_req)), /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, hci_le_remote_conn_param_req_evt, sizeof(struct hci_ev_le_remote_conn_param_req)), /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, hci_le_enh_conn_complete_evt, sizeof(struct hci_ev_le_enh_conn_complete)), /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, sizeof(struct hci_ev_le_direct_adv_report), HCI_MAX_EVENT_SIZE), /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, sizeof(struct hci_ev_le_phy_update_complete)), /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, sizeof(struct hci_ev_le_ext_adv_report), HCI_MAX_EVENT_SIZE), /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, hci_le_pa_sync_established_evt, sizeof(struct hci_ev_le_pa_sync_established)), /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT, hci_le_per_adv_report_evt, sizeof(struct hci_ev_le_per_adv_report), HCI_MAX_EVENT_SIZE), /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, sizeof(struct hci_evt_le_ext_adv_set_term)), /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt, sizeof(struct hci_evt_le_cis_established)), /* [0x1a = HCI_EVT_LE_CIS_REQ] */ HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, sizeof(struct hci_evt_le_cis_req)), /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, hci_le_create_big_complete_evt, sizeof(struct hci_evt_le_create_big_complete), HCI_MAX_EVENT_SIZE), /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */ HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED, hci_le_big_sync_established_evt, sizeof(struct hci_evt_le_big_sync_established), HCI_MAX_EVENT_SIZE), /* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */ HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST, hci_le_big_sync_lost_evt, sizeof(struct hci_evt_le_big_sync_lost), HCI_MAX_EVENT_SIZE), /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, hci_le_big_info_adv_report_evt, sizeof(struct hci_evt_le_big_info_adv_report), HCI_MAX_EVENT_SIZE), }; static void hci_le_meta_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct hci_ev_le_meta *ev = data; const struct hci_le_ev *subev; bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); /* Only match event if command OGF is for LE */ if (hdev->req_skb && (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 || hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) && hci_skb_event(hdev->req_skb) == ev->subevent) { *opcode = hci_skb_opcode(hdev->req_skb); hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, req_complete_skb); } subev = &hci_le_ev_table[ev->subevent]; if (!subev->func) return; if (skb->len < subev->min_len) { bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", ev->subevent, skb->len, subev->min_len); return; } /* Just warn if the length is over max_len size it still be * possible to partially parse the event so leave to callback to * decide if that is acceptable. */ if (skb->len > subev->max_len) bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", ev->subevent, skb->len, subev->max_len); data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); if (!data) return; subev->func(hdev, data, skb); } static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event, struct sk_buff *skb) { struct hci_ev_cmd_complete *ev; struct hci_event_hdr *hdr; if (!skb) return false; hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); if (!hdr) return false; if (event) { if (hdr->evt != event) return false; return true; } /* Check if request ended in Command Status - no way to retrieve * any extra parameters in this case. */ if (hdr->evt == HCI_EV_CMD_STATUS) return false; if (hdr->evt != HCI_EV_CMD_COMPLETE) { bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", hdr->evt); return false; } ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); if (!ev) return false; if (opcode != __le16_to_cpu(ev->opcode)) { BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, __le16_to_cpu(ev->opcode)); return false; } return true; } static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, struct sk_buff *skb) { struct hci_ev_le_advertising_info *adv; struct hci_ev_le_direct_adv_info *direct_adv; struct hci_ev_le_ext_adv_info *ext_adv; const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; const struct hci_ev_conn_request *conn_request = (void *)skb->data; hci_dev_lock(hdev); /* If we are currently suspended and this is the first BT event seen, * save the wake reason associated with the event. */ if (!hdev->suspended || hdev->wake_reason) goto unlock; /* Default to remote wake. Values for wake_reason are documented in the * Bluez mgmt api docs. */ hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; /* Once configured for remote wakeup, we should only wake up for * reconnections. It's useful to see which device is waking us up so * keep track of the bdaddr of the connection event that woke us up. */ if (event == HCI_EV_CONN_REQUEST) { bacpy(&hdev->wake_addr, &conn_request->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_CONN_COMPLETE) { bacpy(&hdev->wake_addr, &conn_complete->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_LE_META) { struct hci_ev_le_meta *le_ev = (void *)skb->data; u8 subevent = le_ev->subevent; u8 *ptr = &skb->data[sizeof(*le_ev)]; u8 num_reports = *ptr; if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || subevent == HCI_EV_LE_DIRECT_ADV_REPORT || subevent == HCI_EV_LE_EXT_ADV_REPORT) && num_reports) { adv = (void *)(ptr + 1); direct_adv = (void *)(ptr + 1); ext_adv = (void *)(ptr + 1); switch (subevent) { case HCI_EV_LE_ADVERTISING_REPORT: bacpy(&hdev->wake_addr, &adv->bdaddr); hdev->wake_addr_type = adv->bdaddr_type; break; case HCI_EV_LE_DIRECT_ADV_REPORT: bacpy(&hdev->wake_addr, &direct_adv->bdaddr); hdev->wake_addr_type = direct_adv->bdaddr_type; break; case HCI_EV_LE_EXT_ADV_REPORT: bacpy(&hdev->wake_addr, &ext_adv->bdaddr); hdev->wake_addr_type = ext_adv->bdaddr_type; break; } } } else { hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; } unlock: hci_dev_unlock(hdev); } #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .req = false, \ .func = _func, \ .min_len = _min_len, \ .max_len = _max_len, \ } #define HCI_EV(_op, _func, _len) \ HCI_EV_VL(_op, _func, _len, _len) #define HCI_EV_STATUS(_op, _func) \ HCI_EV(_op, _func, sizeof(struct hci_ev_status)) #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .req = true, \ .func_req = _func, \ .min_len = _min_len, \ .max_len = _max_len, \ } #define HCI_EV_REQ(_op, _func, _len) \ HCI_EV_REQ_VL(_op, _func, _len, _len) /* Entries in this table shall have their position according to the event opcode * they handle so the use of the macros above is recommend since it does attempt * to initialize at its proper index using Designated Initializers that way * events without a callback function don't have entered. */ static const struct hci_ev { bool req; union { void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); void (*func_req)(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb); }; u16 min_len; u16 max_len; } hci_ev_table[U8_MAX + 1] = { /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), /* [0x02 = HCI_EV_INQUIRY_RESULT] */ HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), /* [0x03 = HCI_EV_CONN_COMPLETE] */ HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, sizeof(struct hci_ev_conn_complete)), /* [0x04 = HCI_EV_CONN_REQUEST] */ HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, sizeof(struct hci_ev_conn_request)), /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, sizeof(struct hci_ev_disconn_complete)), /* [0x06 = HCI_EV_AUTH_COMPLETE] */ HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, sizeof(struct hci_ev_auth_complete)), /* [0x07 = HCI_EV_REMOTE_NAME] */ HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, sizeof(struct hci_ev_remote_name)), /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, sizeof(struct hci_ev_encrypt_change)), /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, hci_change_link_key_complete_evt, sizeof(struct hci_ev_change_link_key_complete)), /* [0x0b = HCI_EV_REMOTE_FEATURES] */ HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, sizeof(struct hci_ev_remote_features)), /* [0x0e = HCI_EV_CMD_COMPLETE] */ HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), /* [0x0f = HCI_EV_CMD_STATUS] */ HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, sizeof(struct hci_ev_cmd_status)), /* [0x10 = HCI_EV_CMD_STATUS] */ HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, sizeof(struct hci_ev_hardware_error)), /* [0x12 = HCI_EV_ROLE_CHANGE] */ HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, sizeof(struct hci_ev_role_change)), /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), /* [0x14 = HCI_EV_MODE_CHANGE] */ HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, sizeof(struct hci_ev_mode_change)), /* [0x16 = HCI_EV_PIN_CODE_REQ] */ HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, sizeof(struct hci_ev_pin_code_req)), /* [0x17 = HCI_EV_LINK_KEY_REQ] */ HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, sizeof(struct hci_ev_link_key_req)), /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, sizeof(struct hci_ev_link_key_notify)), /* [0x1c = HCI_EV_CLOCK_OFFSET] */ HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, sizeof(struct hci_ev_clock_offset)), /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, sizeof(struct hci_ev_pkt_type_change)), /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, sizeof(struct hci_ev_pscan_rep_mode)), /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, hci_inquiry_result_with_rssi_evt, sizeof(struct hci_ev_inquiry_result_rssi), HCI_MAX_EVENT_SIZE), /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, sizeof(struct hci_ev_remote_ext_features)), /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, sizeof(struct hci_ev_sync_conn_complete)), /* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */ HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, hci_extended_inquiry_result_evt, sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, sizeof(struct hci_ev_key_refresh_complete)), /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, sizeof(struct hci_ev_io_capa_request)), /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, sizeof(struct hci_ev_io_capa_reply)), /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, sizeof(struct hci_ev_user_confirm_req)), /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, sizeof(struct hci_ev_user_passkey_req)), /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, sizeof(struct hci_ev_remote_oob_data_request)), /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, sizeof(struct hci_ev_simple_pair_complete)), /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, sizeof(struct hci_ev_user_passkey_notify)), /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, sizeof(struct hci_ev_keypress_notify)), /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, sizeof(struct hci_ev_remote_host_features)), /* [0x3e = HCI_EV_LE_META] */ HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), /* [0xff = HCI_EV_VENDOR] */ HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), }; static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { const struct hci_ev *ev = &hci_ev_table[event]; void *data; if (!ev->func) return; if (skb->len < ev->min_len) { bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", event, skb->len, ev->min_len); return; } /* Just warn if the length is over max_len size it still be * possible to partially parse the event so leave to callback to * decide if that is acceptable. */ if (skb->len > ev->max_len) bt_dev_warn_ratelimited(hdev, "unexpected event 0x%2.2x length: %u > %u", event, skb->len, ev->max_len); data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); if (!data) return; if (ev->req) ev->func_req(hdev, data, skb, opcode, status, req_complete, req_complete_skb); else ev->func(hdev, data, skb); } void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *) skb->data; hci_req_complete_t req_complete = NULL; hci_req_complete_skb_t req_complete_skb = NULL; struct sk_buff *orig_skb = NULL; u8 status = 0, event, req_evt = 0; u16 opcode = HCI_OP_NOP; if (skb->len < sizeof(*hdr)) { bt_dev_err(hdev, "Malformed HCI Event"); goto done; } hci_dev_lock(hdev); kfree_skb(hdev->recv_event); hdev->recv_event = skb_clone(skb, GFP_KERNEL); hci_dev_unlock(hdev); event = hdr->evt; if (!event) { bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", event); goto done; } /* Only match event if command OGF is not for LE */ if (hdev->req_skb && hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 && hci_skb_event(hdev->req_skb) == event) { hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb), status, &req_complete, &req_complete_skb); req_evt = event; } /* If it looks like we might end up having to call * req_complete_skb, store a pristine copy of the skb since the * various handlers may modify the original one through * skb_pull() calls, etc. */ if (req_complete_skb || event == HCI_EV_CMD_STATUS || event == HCI_EV_CMD_COMPLETE) orig_skb = skb_clone(skb, GFP_KERNEL); skb_pull(skb, HCI_EVENT_HDR_SIZE); /* Store wake reason if we're suspended */ hci_store_wake_reason(hdev, event, skb); bt_dev_dbg(hdev, "event 0x%2.2x", event); hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, &req_complete_skb); if (req_complete) { req_complete(hdev, status, opcode); } else if (req_complete_skb) { if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { kfree_skb(orig_skb); orig_skb = NULL; } req_complete_skb(hdev, status, opcode, orig_skb); } done: kfree_skb(orig_skb); kfree_skb(skb); hdev->stat.evt_rx++; }
14 6 2 1 15 6 14 5 14 15 15 16 16 16 1 1 1 1 1 1 9 9 8 3 4 1 3 21 21 2 20 20 1 13 14 8 16 16 16 13 3 3 16 4 1 1 3 6 1 11 16 17 4 13 16 15 5 5 15 15 5 5 5 15 15 25 25 25 24 22 2 3 20 3 2 1 19 3 3 20 20 23 15 8 23 610 615 618 69 581 611 584 7 1 1 1 3 4 4 1 1 2 1 1 80 1 15 15 15 15 25 25 10 80 80 79 79 81 81 80 80 79 81 79 80 79 78 77 80 80 79 80 79 79 80 1 40 13 32 3 1 1 1 15 15 16 8 8 10 29 5 29 28 28 4 7 29 1 126 125 17 1 16 16 1 306 283 279 305 1 1 1 1 1 1 440 20 1 417 130 311 304 131 431 307 1 1 127 307 3 426 425 17 283 1 2 2 287 124 410 409 411 410 411 412 409 411 21 119 1 1 280 404 403 3 3 42 33 12 439 24 437 439 2 6 2 2 4 6 2 5 5 4 4 1 1 1 6 6 11 9 9 9 8 2 12 1 11 8 8 13 1 12 5 6 11 16 16 16 16 15 15 16 88 164 1 164 164 1 2 1 2 149 16 329 1 330 1 1 32 25 1 6 1 11 11 1 2 1 1 5 2 8 9 11 4 4 9 13 13 3 3 4 1 1 2 2 1 3 1 7 7 2 23 18 22 5 3 3 2 1 1 6 3 3 3 3 1 1 1 16 11 1 1 1 2 149 45 2 47 109 1 11 1 1 1 16 18 1 1 1 1 3 103 2 1 1 2 3 2 1 1 1 1 4 1 2 1 2 1 1 16 1 3 1 7 1 1 1 1 1 3 1 1 23 2 1 1 2 1 1 3 1 1 11 4 105 149 2 1 1 1 1 17 17 2 1 1 33 16 2 1 1 11 3 3 1 1 7 7 7 7 568 488 79 7 25 24 5 3 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 // SPDX-License-Identifier: GPL-2.0-or-later /* * TUN - Universal TUN/TAP device driver. * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> * * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ */ /* * Changes: * * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 * Add TUNSETLINK ioctl to set the link encapsulation * * Mark Smith <markzzzsmith@yahoo.com.au> * Use eth_random_addr() for tap MAC address. * * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 * Fixes in packet dropping, queue length setting and queue wakeup. * Increased default tx queue length. * Added ethtool API. * Minor cleanups * * Daniel Podlejski <underley@underley.eu.org> * Modifications for 2.3.99-pre5 kernel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "tun" #define DRV_VERSION "1.6" #define DRV_DESCRIPTION "Universal TUN/TAP device driver" #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/miscdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/compat.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_tun.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/math.h> #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <net/xdp.h> #include <net/ip_tunnels.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/skb_array.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/mutex.h> #include <linux/ieee802154.h> #include <uapi/linux/if_ltalk.h> #include <uapi/linux/if_fddi.h> #include <uapi/linux/if_hippi.h> #include <uapi/linux/if_fc.h> #include <net/ax25.h> #include <net/rose.h> #include <net/6lowpan.h> #include <net/rps.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> #include "tun_vnet.h" static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd); #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* TUN device flags */ /* IFF_ATTACH_QUEUE is never stored in device flags, * overload it to mean fasync when stored there. */ #define TUN_FASYNC IFF_ATTACH_QUEUE #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) #define GOODCOPY_LEN 128 #define FLT_EXACT_COUNT 8 struct tap_filter { unsigned int count; /* Number of addrs. Zero means disabled */ u32 mask[2]; /* Mask of the hashed addrs */ unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal * to max number of VCPUs in guest. */ #define MAX_TAP_QUEUES 256 #define MAX_TAP_FLOWS 4096 #define TUN_FLOW_EXPIRE (3 * HZ) /* A tun_file connects an open character device to a tuntap netdevice. It * also contains all socket related structures (except sock_fprog and tap_filter) * to serve as one transmit queue for tuntap device. The sock_fprog and * tap_filter were kept in tun_struct since they were used for filtering for the * netdevice not for a specific queue (at least I didn't see the requirement for * this). * * RCU usage: * The tun_file and tun_struct are loosely coupled, the pointer from one to the * other can only be read while rcu_read_lock or rtnl_lock is held. */ struct tun_file { struct sock sk; struct socket socket; struct tun_struct __rcu *tun; struct fasync_struct *fasync; /* only used for fasnyc */ unsigned int flags; union { u16 queue_index; unsigned int ifindex; }; struct napi_struct napi; bool napi_enabled; bool napi_frags_enabled; struct mutex napi_mutex; /* Protects access to the above napi */ struct list_head next; struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; }; struct tun_page { struct page *page; int count; }; struct tun_flow_entry { struct hlist_node hash_link; struct rcu_head rcu; struct tun_struct *tun; u32 rxhash; u32 rps_rxhash; int queue_index; unsigned long updated ____cacheline_aligned_in_smp; }; #define TUN_NUM_FLOW_ENTRIES 1024 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) struct tun_prog { struct rcu_head rcu; struct bpf_prog *prog; }; /* Since the socket were moved to tun_file, to preserve the behavior of persist * device, socket filter, sndbuf and vnet header size were restore when the * file were attached to a persist device. */ struct tun_struct { struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; unsigned int numqueues; unsigned int flags; kuid_t owner; kgid_t group; struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4 | \ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM) int align; int vnet_hdr_sz; int sndbuf; struct tap_filter txflt; struct sock_fprog fprog; /* protected by rtnl lock */ bool filter_attached; u32 msg_enable; spinlock_t lock; struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; struct timer_list flow_gc_timer; unsigned long ageing_time; unsigned int numdisabled; struct list_head disabled; void *security; u32 flow_count; u32 rx_batched; atomic_long_t rx_frame_errors; struct bpf_prog __rcu *xdp_prog; struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; /* init args */ struct file *file; struct ifreq *ifr; }; struct veth { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; static void tun_flow_init(struct tun_struct *tun); static void tun_flow_uninit(struct tun_struct *tun); static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; struct sk_buff *skb; int received = 0; __skb_queue_head_init(&process_queue); spin_lock(&queue->lock); skb_queue_splice_tail_init(queue, &process_queue); spin_unlock(&queue->lock); while (received < budget && (skb = __skb_dequeue(&process_queue))) { napi_gro_receive(napi, skb); ++received; } if (!skb_queue_empty(&process_queue)) { spin_lock(&queue->lock); skb_queue_splice(&process_queue, queue); spin_unlock(&queue->lock); } return received; } static int tun_napi_poll(struct napi_struct *napi, int budget) { unsigned int received; received = tun_napi_receive(napi, budget); if (received < budget) napi_complete_done(napi, received); return received; } static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, bool napi_en, bool napi_frags) { tfile->napi_enabled = napi_en; tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); napi_enable(&tfile->napi); } } static void tun_napi_enable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_enable(&tfile->napi); } static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_disable(&tfile->napi); } static void tun_napi_del(struct tun_file *tfile) { if (tfile->napi_enabled) netif_napi_del(&tfile->napi); } static bool tun_napi_frags_enabled(const struct tun_file *tfile) { return tfile->napi_frags_enabled; } static inline u32 tun_hashfn(u32 rxhash) { return rxhash & TUN_MASK_FLOW_ENTRIES; } static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) { struct tun_flow_entry *e; hlist_for_each_entry_rcu(e, head, hash_link) { if (e->rxhash == rxhash) return e; } return NULL; } static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, struct hlist_head *head, u32 rxhash, u16 queue_index) { struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); if (e) { netif_info(tun, tx_queued, tun->dev, "create flow: hash %u index %u\n", rxhash, queue_index); e->updated = jiffies; e->rxhash = rxhash; e->rps_rxhash = 0; e->queue_index = queue_index; e->tun = tun; hlist_add_head_rcu(&e->hash_link, head); ++tun->flow_count; } return e; } static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) { netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", e->rxhash, e->queue_index); hlist_del_rcu(&e->hash_link); kfree_rcu(e, rcu); --tun->flow_count; } static void tun_flow_flush(struct tun_struct *tun) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) tun_flow_delete(tun, e); } spin_unlock_bh(&tun->lock); } static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { if (e->queue_index == queue_index) tun_flow_delete(tun, e); } } spin_unlock_bh(&tun->lock); } static void tun_flow_cleanup(struct timer_list *t) { struct tun_struct *tun = timer_container_of(tun, t, flow_gc_timer); unsigned long delay = tun->ageing_time; unsigned long next_timer = jiffies + delay; unsigned long count = 0; int i; spin_lock(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { unsigned long this_timer; this_timer = e->updated + delay; if (time_before_eq(this_timer, jiffies)) { tun_flow_delete(tun, e); continue; } count++; if (time_before(this_timer, next_timer)) next_timer = this_timer; } } if (count) mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); spin_unlock(&tun->lock); } static void tun_flow_update(struct tun_struct *tun, u32 rxhash, struct tun_file *tfile) { struct hlist_head *head; struct tun_flow_entry *e; unsigned long delay = tun->ageing_time; u16 queue_index = tfile->queue_index; head = &tun->flows[tun_hashfn(rxhash)]; rcu_read_lock(); e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ if (READ_ONCE(e->queue_index) != queue_index) WRITE_ONCE(e->queue_index, queue_index); if (e->updated != jiffies) e->updated = jiffies; sock_rps_record_flow_hash(e->rps_rxhash); } else { spin_lock_bh(&tun->lock); if (!tun_flow_find(head, rxhash) && tun->flow_count < MAX_TAP_FLOWS) tun_flow_create(tun, head, rxhash, queue_index); if (!timer_pending(&tun->flow_gc_timer)) mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + delay)); spin_unlock_bh(&tun->lock); } rcu_read_unlock(); } /* Save the hash received in the stack receive path and update the * flow_hash table accordingly. */ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) { if (unlikely(e->rps_rxhash != hash)) e->rps_rxhash = hash; } /* We try to identify a flow through its rxhash. The reason that * we do not check rxq no. is because some cards(e.g 82599), chooses * the rxq based on the txq where the last packet of the flow comes. As * the userspace application move between processors, we may get a * different rxq no. here. */ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_flow_entry *e; u32 txq, numqueues; numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); if (e) { tun_flow_save_rps_rxhash(e, txq); txq = e->queue_index; } else { txq = reciprocal_scale(txq, numqueues); } return txq; } static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_prog *prog; u32 numqueues; u16 ret = 0; numqueues = READ_ONCE(tun->numqueues); if (!numqueues) return 0; prog = rcu_dereference(tun->steering_prog); if (prog) ret = bpf_prog_run_clear_cb(prog->prog, skb); return ret % numqueues; } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct tun_struct *tun = netdev_priv(dev); u16 ret; rcu_read_lock(); if (rcu_dereference(tun->steering_prog)) ret = tun_ebpf_select_queue(tun, skb); else ret = tun_automq_select_queue(tun, skb); rcu_read_unlock(); return ret; } static inline bool tun_not_capable(struct tun_struct *tun) { const struct cred *cred = current_cred(); struct net *net = dev_net(tun->dev); return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !ns_capable(net->user_ns, CAP_NET_ADMIN); } static void tun_set_real_num_queues(struct tun_struct *tun) { netif_set_real_num_tx_queues(tun->dev, tun->numqueues); netif_set_real_num_rx_queues(tun->dev, tun->numqueues); } static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) { tfile->detached = tun; list_add_tail(&tfile->next, &tun->disabled); ++tun->numdisabled; } static struct tun_struct *tun_enable_queue(struct tun_file *tfile) { struct tun_struct *tun = tfile->detached; tfile->detached = NULL; list_del_init(&tfile->next); --tun->numdisabled; return tun; } void tun_ptr_free(void *ptr) { if (!ptr) return; if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); xdp_return_frame(xdpf); } else { __skb_array_destroy_skb(ptr); } } EXPORT_SYMBOL_GPL(tun_ptr_free); static void tun_queue_purge(struct tun_file *tfile) { void *ptr; while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) tun_ptr_free(ptr); skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; struct tun_struct *tun; tun = rtnl_dereference(tfile->tun); if (tun && clean) { if (!tfile->detached) tun_napi_disable(tfile); tun_napi_del(tfile); } if (tun && !tfile->detached) { u16 index = tfile->queue_index; BUG_ON(index >= tun->numqueues); rcu_assign_pointer(tun->tfiles[index], tun->tfiles[tun->numqueues - 1]); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; ntfile->xdp_rxq.queue_index = index; rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], NULL); --tun->numqueues; if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); } else { tun_disable_queue(tun, tfile); tun_napi_disable(tfile); } synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); /* Drop read queue */ tun_queue_purge(tfile); tun_set_real_num_queues(tun); } else if (tfile->detached && clean) { tun = tun_enable_queue(tfile); sock_put(&tfile->sk); } if (clean) { if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { netif_carrier_off(tun->dev); if (!(tun->flags & IFF_PERSIST) && tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } if (tun) xdp_rxq_info_unreg(&tfile->xdp_rxq); ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); } } static void tun_detach(struct tun_file *tfile, bool clean) { struct tun_struct *tun; struct net_device *dev; rtnl_lock(); tun = rtnl_dereference(tfile->tun); dev = tun ? tun->dev : NULL; __tun_detach(tfile, clean); if (dev) netdev_state_change(dev); rtnl_unlock(); if (clean) sock_put(&tfile->sk); } static void tun_detach_all(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile, *tmp; int i, n = tun->numqueues; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); BUG_ON(!tfile); tun_napi_disable(tfile); tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); --tun->numqueues; } list_for_each_entry(tfile, &tun->disabled, next) { tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); } BUG_ON(tun->numqueues != 0); synchronize_net(); for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tun_napi_del(tfile); /* Drop read queue */ tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } BUG_ON(tun->numdisabled != 0); if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter, bool napi, bool napi_frags, bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; int err; err = security_tun_dev_attach(tfile->socket.sk, tun->security); if (err < 0) goto out; err = -EINVAL; if (rtnl_dereference(tfile->tun) && !tfile->detached) goto out; err = -EBUSY; if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) goto out; err = -E2BIG; if (!tfile->detached && tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) goto out; err = 0; /* Re-attach the filter to persist device */ if (!skip_filter && (tun->filter_attached == true)) { lock_sock(tfile->socket.sk); err = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (!err) goto out; } if (!tfile->detached && ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free)) { err = -ENOMEM; goto out; } tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; if (tfile->detached) { /* Re-attach detached tfile, updating XDP queue_index */ WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); if (tfile->xdp_rxq.queue_index != tfile->queue_index) tfile->xdp_rxq.queue_index = tfile->queue_index; } else { /* Setup XDP RX-queue info, for new tfile getting attached */ err = xdp_rxq_info_reg(&tfile->xdp_rxq, tun->dev, tfile->queue_index, 0); if (err < 0) goto out; err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err < 0) { xdp_rxq_info_unreg(&tfile->xdp_rxq); goto out; } err = 0; } if (tfile->detached) { tun_enable_queue(tfile); tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); } if (rtnl_dereference(tun->xdp_prog)) sock_set_flag(&tfile->sk, SOCK_XDP); /* device is allowed to go away first, so no need to hold extra * refcnt. */ /* Publish tfile->tun and tun->tfiles only after we've fully * initialized tfile; otherwise we risk using half-initialized * object. */ if (publish_tun) rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; tun_set_real_num_queues(tun); out: return err; } static struct tun_struct *tun_get(struct tun_file *tfile) { struct tun_struct *tun; rcu_read_lock(); tun = rcu_dereference(tfile->tun); if (tun) dev_hold(tun->dev); rcu_read_unlock(); return tun; } static void tun_put(struct tun_struct *tun) { dev_put(tun->dev); } /* TAP filtering */ static void addr_hash_set(u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; mask[n >> 5] |= (1 << (n & 31)); } static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; return mask[n >> 5] & (1 << (n & 31)); } static int update_filter(struct tap_filter *filter, void __user *arg) { struct { u8 u[ETH_ALEN]; } *addr; struct tun_filter uf; int err, alen, n, nexact; if (copy_from_user(&uf, arg, sizeof(uf))) return -EFAULT; if (!uf.count) { /* Disabled */ filter->count = 0; return 0; } alen = ETH_ALEN * uf.count; addr = memdup_user(arg + sizeof(uf), alen); if (IS_ERR(addr)) return PTR_ERR(addr); /* The filter is updated without holding any locks. Which is * perfectly safe. We disable it first and in the worst * case we'll accept a few undesired packets. */ filter->count = 0; wmb(); /* Use first set of addresses as an exact filter */ for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) memcpy(filter->addr[n], addr[n].u, ETH_ALEN); nexact = n; /* Remaining multicast addresses are hashed, * unicast will leave the filter disabled. */ memset(filter->mask, 0, sizeof(filter->mask)); for (; n < uf.count; n++) { if (!is_multicast_ether_addr(addr[n].u)) { err = 0; /* no filter */ goto free_addr; } addr_hash_set(filter->mask, addr[n].u); } /* For ALLMULTI just set the mask to all ones. * This overrides the mask populated above. */ if ((uf.flags & TUN_FLT_ALLMULTI)) memset(filter->mask, ~0, sizeof(filter->mask)); /* Now enable the filter */ wmb(); filter->count = nexact; /* Return the number of exact filters */ err = nexact; free_addr: kfree(addr); return err; } /* Returns: 0 - drop, !=0 - accept */ static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) { /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect * at this point. */ struct ethhdr *eh = (struct ethhdr *) skb->data; int i; /* Exact match */ for (i = 0; i < filter->count; i++) if (ether_addr_equal(eh->h_dest, filter->addr[i])) return 1; /* Inexact match (multicast only) */ if (is_multicast_ether_addr(eh->h_dest)) return addr_hash_test(filter->mask, eh->h_dest); return 0; } /* * Checks whether the packet is accepted or not. * Returns: 0 - drop, !=0 - accept */ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) { if (!filter->count) return 1; return run_filter(filter, skb); } /* Network device part of the driver */ static const struct ethtool_ops tun_ethtool_ops; static int tun_net_init(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct ifreq *ifr = tun->ifr; int err; spin_lock_init(&tun->lock); err = security_tun_dev_alloc_security(&tun->security); if (err < 0) return err; tun_flow_init(tun); dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->hw_enc_features = dev->hw_features; dev->features = dev->hw_features; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); dev->lltx = true; tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, false); if (err < 0) { tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); return err; } return 0; } /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { tun_detach_all(dev); } /* Net device open. */ static int tun_net_open(struct net_device *dev) { netif_tx_start_all_queues(dev); return 0; } /* Net device close. */ static int tun_net_close(struct net_device *dev) { netif_tx_stop_all_queues(dev); return 0; } /* Net device start xmit */ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) { #ifdef CONFIG_RPS if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ struct tun_flow_entry *e; __u32 rxhash; rxhash = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); if (e) tun_flow_save_rps_rxhash(e, rxhash); } #endif } static unsigned int run_ebpf_filter(struct tun_struct *tun, struct sk_buff *skb, int len) { struct tun_prog *prog = rcu_dereference(tun->filter_prog); if (prog) len = bpf_prog_run_clear_cb(prog->prog, skb); return len; } /* Net device start xmit */ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; struct tun_struct *tun = netdev_priv(dev); int txq = skb->queue_mapping; struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; rcu_read_lock(); tfile = rcu_dereference(tun->tfiles[txq]); /* Drop packet if interface is not attached */ if (!tfile) { drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (!rcu_dereference(tun->steering_prog)) tun_automq_xmit(tun, skb); netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); /* Drop if the filter does not like it. * This is a noop if the filter is disabled. * Filter can be enabled only for the TAP devices. */ if (!check_filter(&tun->txflt, skb)) { drop_reason = SKB_DROP_REASON_TAP_TXFILTER; goto drop; } if (tfile->socket.sk->sk_filter && sk_filter_reason(tfile->socket.sk, skb, &drop_reason)) goto drop; len = run_ebpf_filter(tun, skb, len); if (len == 0) { drop_reason = SKB_DROP_REASON_TAP_FILTER; goto drop; } if (pskb_trim(skb, len)) { drop_reason = SKB_DROP_REASON_NOMEM; goto drop; } if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } skb_tx_timestamp(skb); /* Orphan the skb - required as we might hang on to it * for indefinite time. */ skb_orphan(skb); nf_reset_ct(skb); if (ptr_ring_produce(&tfile->tx_ring, skb)) { drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } /* dev->lltx requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); txq_trans_cond_update(queue); /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); rcu_read_unlock(); return NETDEV_TX_OK; drop: dev_core_stats_tx_dropped_inc(dev); skb_tx_error(skb); kfree_skb_reason(skb, drop_reason); rcu_read_unlock(); return NET_XMIT_DROP; } static void tun_net_mclist(struct net_device *dev) { /* * This callback is supposed to deal with mc filter in * _rx_ path and has nothing to do with the _tx_ path. * In rx path we always accept everything userspace gives us. */ } static netdev_features_t tun_net_fix_features(struct net_device *dev, netdev_features_t features) { struct tun_struct *tun = netdev_priv(dev); return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } static void tun_set_headroom(struct net_device *dev, int new_hr) { struct tun_struct *tun = netdev_priv(dev); if (new_hr < NET_SKB_PAD) new_hr = NET_SKB_PAD; tun->align = new_hr; } static void tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct tun_struct *tun = netdev_priv(dev); dev_get_tstats64(dev, stats); stats->rx_frame_errors += (unsigned long)atomic_long_read(&tun->rx_frame_errors); } static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; struct bpf_prog *old_prog; int i; old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } list_for_each_entry(tfile, &tun->disabled, next) { if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } return 0; } static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return tun_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) { if (new_carrier) { struct tun_struct *tun = netdev_priv(dev); if (!tun->numqueues) return -EPERM; netif_carrier_on(dev); } else { netif_carrier_off(dev); } return 0; } static const struct net_device_ops tun_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_select_queue = tun_select_queue, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, .ndo_change_carrier = tun_net_change_carrier, }; static void __tun_xdp_flush_tfile(struct tun_file *tfile) { /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); } static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; u32 numqueues; int nxmit = 0; int i; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; rcu_read_lock(); resample: numqueues = READ_ONCE(tun->numqueues); if (!numqueues) { rcu_read_unlock(); return -ENXIO; /* Caller will free/return all frames */ } tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); if (unlikely(!tfile)) goto resample; spin_lock(&tfile->tx_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *xdp = frames[i]; /* Encode the XDP flag into lowest bit for consumer to differ * XDP buffer from sk_buff. */ void *frame = tun_xdp_to_ptr(xdp); if (__ptr_ring_produce(&tfile->tx_ring, frame)) { dev_core_stats_tx_dropped_inc(dev); break; } nxmit++; } spin_unlock(&tfile->tx_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) __tun_xdp_flush_tfile(tfile); rcu_read_unlock(); return nxmit; } static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) { struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); int nxmit; if (unlikely(!frame)) return -EOVERFLOW; nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); if (!nxmit) xdp_return_frame_rx_napi(frame); return nxmit; } static const struct net_device_ops tap_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_set_rx_mode = tun_net_mclist, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = tun_select_queue, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, .ndo_change_carrier = tun_net_change_carrier, }; static void tun_flow_init(struct tun_struct *tun) { int i; for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) INIT_HLIST_HEAD(&tun->flows[i]); tun->ageing_time = TUN_FLOW_EXPIRE; timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + tun->ageing_time)); } static void tun_flow_uninit(struct tun_struct *tun) { timer_delete_sync(&tun->flow_gc_timer); tun_flow_flush(tun); } #define MIN_MTU 68 #define MAX_MTU 65535 /* Initialize net device. */ static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; dev->header_ops = &ip_tunnel_header_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = 1500; /* Zero header length */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; break; case IFF_TAP: dev->netdev_ops = &tap_netdev_ops; /* Ethernet TAP Device */ ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; eth_hw_addr_random(dev); /* Currently tun does not support XDP, only tap does. */ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; break; } dev->min_mtu = MIN_MTU; dev->max_mtu = MAX_MTU - dev->hard_header_len; } static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) { struct sock *sk = tfile->socket.sk; return (tun->dev->flags & IFF_UP) && sock_writeable(sk); } /* Character device part */ /* Poll */ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) { struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); struct sock *sk; __poll_t mask = 0; if (!tun) return EPOLLERR; sk = tfile->socket.sk; poll_wait(file, sk_sleep(sk), wait); if (!ptr_ring_empty(&tfile->tx_ring)) mask |= EPOLLIN | EPOLLRDNORM; /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to * guarantee EPOLLOUT to be raised by either here or * tun_sock_write_space(). Then process could get notification * after it writes to a down device and meets -EIO. */ if (tun_sock_writeable(tun, tfile) || (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && tun_sock_writeable(tun, tfile))) mask |= EPOLLOUT | EPOLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) mask = EPOLLERR; tun_put(tun); return mask; } static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, size_t len, const struct iov_iter *it) { struct sk_buff *skb; size_t linear; int err; int i; if (it->nr_segs > MAX_SKB_FRAGS + 1 || len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-EMSGSIZE); local_bh_disable(); skb = napi_get_frags(&tfile->napi); local_bh_enable(); if (!skb) return ERR_PTR(-ENOMEM); linear = iov_iter_single_seg_count(it); err = __skb_grow(skb, linear); if (err) goto free; skb->len = len; skb->data_len = len - linear; skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { const struct iovec *iov = iter_iov(it) + i; size_t fragsz = iov->iov_len; struct page *page; void *frag; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } frag = netdev_alloc_frag(fragsz); if (!frag) { err = -ENOMEM; goto free; } page = virt_to_head_page(frag); skb_fill_page_desc(skb, i - 1, page, frag - page_address(page), fragsz); } return skb; free: /* frees skb and all frags allocated with napi_alloc_frag() */ napi_free_frags(&tfile->napi); return ERR_PTR(err); } /* prepad is the amount to reserve at front. len is length after that. * linear is a hint as to how much to copy (usually headers). */ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, size_t prepad, size_t len, size_t linear, int noblock) { struct sock *sk = tfile->socket.sk; struct sk_buff *skb; int err; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE) linear = len; if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, &err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) return ERR_PTR(err); skb_reserve(skb, prepad); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, int more) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; u32 rx_batched = tun->rx_batched; bool rcv = false; if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; } spin_lock(&queue->lock); if (!more || skb_queue_len(queue) == rx_batched) { __skb_queue_head_init(&process_queue); skb_queue_splice_tail_init(queue, &process_queue); rcv = true; } else { __skb_queue_tail(queue, skb); } spin_unlock(&queue->lock); if (rcv) { struct sk_buff *nskb; local_bh_disable(); while ((nskb = __skb_dequeue(&process_queue))) { skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); } skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } } static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, int len, int noblock, bool zerocopy) { if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) return false; if (tfile->socket.sk->sk_sndbuf != INT_MAX) return false; if (!noblock) return false; if (zerocopy) return false; if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; return true; } static struct sk_buff *__tun_build_skb(struct tun_file *tfile, struct page_frag *alloc_frag, char *buf, int buflen, int len, int pad, int metasize) { struct sk_buff *skb = build_skb(buf, buflen); if (!skb) return ERR_PTR(-ENOMEM); skb_reserve(skb, pad); skb_put(skb, len); if (metasize) skb_metadata_set(skb, metasize); skb_set_owner_w(skb, tfile->socket.sk); get_page(alloc_frag->page); alloc_frag->offset += buflen; return skb; } static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, struct xdp_buff *xdp, u32 act) { int err; switch (act) { case XDP_REDIRECT: err = xdp_do_redirect(tun->dev, xdp, xdp_prog); if (err) { dev_core_stats_rx_dropped_inc(tun->dev); return err; } dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); break; case XDP_TX: err = tun_xdp_tx(tun->dev, xdp); if (err < 0) { dev_core_stats_rx_dropped_inc(tun->dev); return err; } dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); break; case XDP_PASS: break; default: bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); fallthrough; case XDP_DROP: dev_core_stats_rx_dropped_inc(tun->dev); break; } return act; } static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, struct virtio_net_hdr *hdr, int len, int *skb_xdp) { struct page_frag *alloc_frag = &current->task_frag; struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); char *buf; size_t copied; int pad = TUN_RX_PAD; int metasize = 0; int err = 0; rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock(); alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) return ERR_PTR(-ENOMEM); buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; copied = copy_page_from_iter(alloc_frag->page, alloc_frag->offset + pad, len, from); if (copied != len) return ERR_PTR(-EFAULT); /* There's a small window that XDP may be set after the check * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad, metasize); } *skb_xdp = 0; local_bh_disable(); rcu_read_lock(); bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { struct xdp_buff xdp; u32 act; xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); xdp_prepare_buff(&xdp, buf, pad, len, true); act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; } err = tun_xdp_act(tun, xdp_prog, &xdp, act); if (err < 0) { if (act == XDP_REDIRECT || act == XDP_TX) put_page(alloc_frag->page); goto out; } if (err == XDP_REDIRECT) xdp_do_flush(); if (err != XDP_PASS) goto out; pad = xdp.data - xdp.data_hard_start; len = xdp.data_end - xdp.data; /* It is known that the xdp_buff was prepared with metadata * support, so the metasize will never be negative. */ metasize = xdp.data - xdp.data_meta; } bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad, metasize); out: bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); return NULL; } /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; size_t total_len = iov_iter_count(from); size_t len = total_len, align = tun->align, linear; struct virtio_net_hdr_v1_hash_tunnel hdr; struct virtio_net_hdr *gso; int good_linear; int copylen; int hdr_len = 0; bool zerocopy = false; int err; u32 rxhash = 0; int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; netdev_features_t features = 0; /* * Keep it easy and always zero the whole buffer, even if the * tunnel-related field will be touched only when the feature * is enabled and the hdr size id compatible. */ memset(&hdr, 0, sizeof(hdr)); gso = (struct virtio_net_hdr *)&hdr; if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; len -= sizeof(pi); if (!copy_from_iter_full(&pi, sizeof(pi), from)) return -EFAULT; } if (tun->flags & IFF_VNET_HDR) { int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); features = tun_vnet_hdr_guest_features(vnet_hdr_sz); hdr_len = __tun_vnet_hdr_get(vnet_hdr_sz, tun->flags, features, from, gso); if (hdr_len < 0) return hdr_len; len -= vnet_hdr_sz; } if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { align += NET_IP_ALIGN; if (unlikely(len < ETH_HLEN || (hdr_len && hdr_len < ETH_HLEN))) return -EINVAL; } good_linear = SKB_MAX_HEAD(align); if (msg_control) { struct iov_iter i = *from; /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ copylen = min(hdr_len ? hdr_len : GOODCOPY_LEN, good_linear); linear = copylen; iov_iter_advance(&i, copylen); if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { /* For the packet that is not easy to be processed * (e.g gso or jumbo packet), we will do it at after * skb was created with generic XDP routine. */ skb = tun_build_skb(tun, tfile, from, gso, len, &skb_xdp); err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (!skb) return total_len; } else { if (!zerocopy) { copylen = len; linear = min(hdr_len, good_linear); } if (frags) { mutex_lock(&tfile->napi_mutex); skb = tun_napi_alloc_frags(tfile, copylen, from); /* tun_napi_alloc_frags() enforces a layout for the skb. * If zerocopy is enabled, then this layout will be * overwritten by zerocopy_sg_from_iter(). */ zerocopy = false; } else { if (!linear) linear = min_t(size_t, good_linear, copylen); skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); } err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (zerocopy) err = zerocopy_sg_from_iter(skb, from); else err = skb_copy_datagram_from_iter(skb, 0, from, len); if (err) { err = -EFAULT; drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } } if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, &hdr)) { atomic_long_inc(&tun->rx_frame_errors); err = -EINVAL; goto free_skb; } switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: if (tun->flags & IFF_NO_PI) { u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; switch (ip_version) { case 4: pi.proto = htons(ETH_P_IP); break; case 6: pi.proto = htons(ETH_P_IPV6); break; default: err = -EINVAL; goto drop; } } skb_reset_mac_header(skb); skb->protocol = pi.proto; skb->dev = tun->dev; break; case IFF_TAP: if (frags && !pskb_may_pull(skb, ETH_HLEN)) { err = -ENOMEM; drop_reason = SKB_DROP_REASON_HDR_TRUNC; goto drop; } skb->protocol = eth_type_trans(skb, tun->dev); break; } /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; uarg->ops->complete(NULL, uarg, false); } skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { struct bpf_prog *xdp_prog; int ret; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { ret = do_xdp_generic(xdp_prog, &skb); if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); goto unlock_frags; } if (frags && skb != tfile->napi.skb) tfile->napi.skb = skb; } rcu_read_unlock(); local_bh_enable(); } /* Compute the costly rx hash only if needed for flow updates. * We may get a very small possibility of OOO during switching, not * worth to optimize. */ if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); rcu_read_lock(); if (unlikely(!(tun->dev->flags & IFF_UP))) { err = -EIO; rcu_read_unlock(); drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (frags) { u32 headlen; /* Exercise flow dissector code path. */ skb_push(skb, ETH_HLEN); headlen = eth_get_headlen(tun->dev, skb->data, skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { WARN_ON_ONCE(1); err = -ENOMEM; dev_core_stats_rx_dropped_inc(tun->dev); napi_busy: napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); return err; } if (likely(napi_schedule_prep(&tfile->napi))) { local_bh_disable(); napi_gro_frags(&tfile->napi); napi_complete(&tfile->napi); local_bh_enable(); } else { err = -EBUSY; goto napi_busy; } mutex_unlock(&tfile->napi_mutex); } else if (tfile->napi_enabled) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; int queue_len; spin_lock_bh(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock_bh(&queue->lock); rcu_read_unlock(); err = -EBUSY; goto free_skb; } __skb_queue_tail(queue, skb); queue_len = skb_queue_len(queue); spin_unlock(&queue->lock); if (!more || queue_len > NAPI_POLL_WEIGHT) napi_schedule(&tfile->napi); local_bh_enable(); } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { tun_rx_batched(tun, tfile, skb, more); } else { netif_rx(skb); } rcu_read_unlock(); preempt_disable(); dev_sw_netstats_rx_add(tun->dev, len); preempt_enable(); if (rxhash) tun_flow_update(tun, rxhash, tfile); return total_len; drop: if (err != -EAGAIN) dev_core_stats_rx_dropped_inc(tun->dev); free_skb: if (!IS_ERR_OR_NULL(skb)) kfree_skb_reason(skb, drop_reason); unlock_frags: if (frags) { tfile->napi.skb = NULL; mutex_unlock(&tfile->napi_mutex); } return err ?: total_len; } static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t result; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; result = tun_get_user(tun, tfile, NULL, from, noblock, false); tun_put(tun); return result; } static ssize_t tun_put_user_xdp(struct tun_struct *tun, struct tun_file *tfile, struct xdp_frame *xdp_frame, struct iov_iter *iter) { int vnet_hdr_sz = 0; size_t size = xdp_frame->len; ssize_t ret; if (tun->flags & IFF_VNET_HDR) { struct virtio_net_hdr gso = { 0 }; vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso); if (ret) return ret; } ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, ret); preempt_enable(); return ret; } /* Put packet to the user space buffer */ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; int ret; if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); total = skb->len + vlan_hlen + vnet_hdr_sz; if (!(tun->flags & IFF_NO_PI)) { if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; total += sizeof(pi); if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; } if (vnet_hdr_sz) { struct virtio_net_hdr_v1_hash_tunnel hdr; struct virtio_net_hdr *gso; ret = tun_vnet_hdr_tnl_from_skb(tun->flags, tun->dev, skb, &hdr); if (ret) return ret; /* * Drop the packet if the configured header size is too small * WRT the enabled offloads. */ gso = (struct virtio_net_hdr *)&hdr; ret = __tun_vnet_hdr_put(vnet_hdr_sz, tun->dev->features, iter, gso); if (ret) return ret; } if (vlan_hlen) { int ret; struct veth veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) goto done; ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: /* caller is in process context, */ preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); preempt_enable(); return total; } static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); void *ptr = NULL; int error = 0; ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) goto out; if (noblock) { error = -EAGAIN; goto out; } add_wait_queue(&tfile->socket.wq.wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) break; if (signal_pending(current)) { error = -ERESTARTSYS; break; } if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { error = -EFAULT; break; } schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&tfile->socket.wq.wait, &wait); out: *err = error; return ptr; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, int noblock, void *ptr) { ssize_t ret; int err; if (!iov_iter_count(to)) { tun_ptr_free(ptr); return 0; } if (!ptr) { /* Read frames from ring */ ptr = tun_ring_recv(tfile, noblock, &err); if (!ptr) return err; } if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); ret = tun_put_user_xdp(tun, tfile, xdpf, to); xdp_return_frame(xdpf); } else { struct sk_buff *skb = ptr; ret = tun_put_user(tun, tfile, skb, to); if (unlikely(ret < 0)) kfree_skb(skb); else consume_skb(skb); } return ret; } static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t len = iov_iter_count(to), ret; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; ret = tun_do_read(tun, tfile, to, noblock, NULL); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; tun_put(tun); return ret; } static void tun_prog_free(struct rcu_head *rcu) { struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); bpf_prog_destroy(prog->prog); kfree(prog); } static int __tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, struct bpf_prog *prog) { struct tun_prog *old, *new = NULL; if (prog) { new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM; new->prog = prog; } spin_lock_bh(&tun->lock); old = rcu_dereference_protected(*prog_p, lockdep_is_held(&tun->lock)); rcu_assign_pointer(*prog_p, new); spin_unlock_bh(&tun->lock); if (old) call_rcu(&old->rcu, tun_prog_free); return 0; } static void tun_free_netdev(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); BUG_ON(!(list_empty(&tun->disabled))); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); __tun_set_ebpf(tun, &tun->steering_prog, NULL); __tun_set_ebpf(tun, &tun->filter_prog, NULL); } static void tun_setup(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); tun->owner = INVALID_UID; tun->group = INVALID_GID; tun_default_link_ksettings(dev, &tun->link_ksettings); dev->ethtool_ops = &tun_ethtool_ops; dev->needs_free_netdev = true; dev->priv_destructor = tun_free_netdev; /* We prefer our own queue length */ dev->tx_queue_len = TUN_READQ_SIZE; } /* Trivial set of netlink ops to allow deleting tun or tap * device with netlink. */ static int tun_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "tun/tap creation via rtnetlink is not supported."); return -EOPNOTSUPP; } static size_t tun_get_size(const struct net_device *dev) { BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); return nla_total_size(sizeof(uid_t)) + /* OWNER */ nla_total_size(sizeof(gid_t)) + /* GROUP */ nla_total_size(sizeof(u8)) + /* TYPE */ nla_total_size(sizeof(u8)) + /* PI */ nla_total_size(sizeof(u8)) + /* VNET_HDR */ nla_total_size(sizeof(u8)) + /* PERSIST */ nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 0; } static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) goto nla_put_failure; if (uid_valid(tun->owner) && nla_put_u32(skb, IFLA_TUN_OWNER, from_kuid_munged(current_user_ns(), tun->owner))) goto nla_put_failure; if (gid_valid(tun->group) && nla_put_u32(skb, IFLA_TUN_GROUP, from_kgid_munged(current_user_ns(), tun->group))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, !!(tun->flags & IFF_MULTI_QUEUE))) goto nla_put_failure; if (tun->flags & IFF_MULTI_QUEUE) { if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, tun->numdisabled)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops tun_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, .validate = tun_validate, .get_size = tun_get_size, .fill_info = tun_fill_info, }; static void tun_sock_write_space(struct sock *sk) { struct tun_file *tfile; wait_queue_head_t *wqueue; if (!sock_writeable(sk)) return; if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); tfile = container_of(sk, struct tun_file, sk); kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } static void tun_put_page(struct tun_page *tpage) { if (tpage->page) __page_frag_cache_drain(tpage->page, tpage->count); } static int tun_xdp_one(struct tun_struct *tun, struct tun_file *tfile, struct xdp_buff *xdp, int *flush, struct tun_page *tpage) { unsigned int datasize = xdp->data_end - xdp->data; struct virtio_net_hdr *gso = xdp->data_hard_start; struct virtio_net_hdr_v1_hash_tunnel *tnl_hdr; struct bpf_prog *xdp_prog; struct sk_buff *skb = NULL; struct sk_buff_head *queue; netdev_features_t features; u32 rxhash = 0, act; int buflen = xdp->frame_sz; int metasize = 0; int ret = 0; bool skb_xdp = false; struct page *page; if (unlikely(datasize < ETH_HLEN)) return -EINVAL; xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { if (gso->gso_type) { skb_xdp = true; goto build; } xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); act = bpf_prog_run_xdp(xdp_prog, xdp); ret = tun_xdp_act(tun, xdp_prog, xdp, act); if (ret < 0) { put_page(virt_to_head_page(xdp->data)); return ret; } switch (ret) { case XDP_REDIRECT: *flush = true; fallthrough; case XDP_TX: return 0; case XDP_PASS: break; default: page = virt_to_head_page(xdp->data); if (tpage->page == page) { ++tpage->count; } else { tun_put_page(tpage); tpage->page = page; tpage->count = 1; } return 0; } } build: skb = build_skb(xdp->data_hard_start, buflen); if (!skb) { ret = -ENOMEM; goto out; } skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); /* The externally provided xdp_buff may have no metadata support, which * is marked by xdp->data_meta being xdp->data + 1. This will lead to a * metasize of -1 and is the reason why the condition checks for > 0. */ metasize = xdp->data - xdp->data_meta; if (metasize > 0) skb_metadata_set(skb, metasize); features = tun_vnet_hdr_guest_features(READ_ONCE(tun->vnet_hdr_sz)); tnl_hdr = (struct virtio_net_hdr_v1_hash_tunnel *)gso; if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, tnl_hdr)) { atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); ret = -EINVAL; goto out; } skb->protocol = eth_type_trans(skb, tun->dev); skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { ret = do_xdp_generic(xdp_prog, &skb); if (ret != XDP_PASS) { ret = 0; goto out; } } if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); if (tfile->napi_enabled) { queue = &tfile->sk.sk_write_queue; spin_lock(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock(&queue->lock); kfree_skb(skb); return -EBUSY; } __skb_queue_tail(queue, skb); spin_unlock(&queue->lock); ret = 1; } else { netif_receive_skb(skb); ret = 0; } /* No need to disable preemption here since this function is * always called with bh disabled */ dev_sw_netstats_rx_add(tun->dev, datasize); if (rxhash) tun_flow_update(tun, rxhash, tfile); out: return ret; } static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); struct tun_msg_ctl *ctl = m->msg_control; struct xdp_buff *xdp; if (!tun) return -EBADFD; if (m->msg_controllen == sizeof(struct tun_msg_ctl) && ctl && ctl->type == TUN_MSG_PTR) { struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct tun_page tpage; int n = ctl->num; int flush = 0, queued = 0; memset(&tpage, 0, sizeof(tpage)); local_bh_disable(); rcu_read_lock(); bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); if (ret > 0) queued += ret; } if (flush) xdp_do_flush(); if (tfile->napi_enabled && queued > 0) napi_schedule(&tfile->napi); bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); tun_put_page(&tpage); ret = total_len; goto out; } ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); out: tun_put(tun); return ret; } static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); void *ptr = m->msg_control; int ret; if (!tun) { ret = -EBADFD; goto out_free; } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; goto out_put_tun; } if (flags & MSG_ERRQUEUE) { ret = sock_recv_errqueue(sock->sk, m, total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } out: tun_put(tun); return ret; out_put_tun: tun_put(tun); out_free: tun_ptr_free(ptr); return ret; } static int tun_ptr_peek_len(void *ptr) { if (likely(ptr)) { if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); return xdpf->len; } return __skb_array_len_with_tag(ptr); } else { return 0; } } static int tun_peek_len(struct socket *sock) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun; int ret = 0; tun = tun_get(tfile); if (!tun) return 0; ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); tun_put(tun); return ret; } /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops tun_socket_ops = { .peek_len = tun_peek_len, .sendmsg = tun_sendmsg, .recvmsg = tun_recvmsg, }; static struct proto tun_proto = { .name = "tun", .owner = THIS_MODULE, .obj_size = sizeof(struct tun_file), }; static int tun_flags(struct tun_struct *tun) { return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); } static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); } static ssize_t owner_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return uid_valid(tun->owner)? sysfs_emit(buf, "%u\n", from_kuid_munged(current_user_ns(), tun->owner)) : sysfs_emit(buf, "-1\n"); } static ssize_t group_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return gid_valid(tun->group) ? sysfs_emit(buf, "%u\n", from_kgid_munged(current_user_ns(), tun->group)) : sysfs_emit(buf, "-1\n"); } static DEVICE_ATTR_RO(tun_flags); static DEVICE_ATTR_RO(owner); static DEVICE_ATTR_RO(group); static struct attribute *tun_dev_attrs[] = { &dev_attr_tun_flags.attr, &dev_attr_owner.attr, &dev_attr_group.attr, NULL }; static const struct attribute_group tun_attr_group = { .attrs = tun_dev_attrs }; static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) { struct tun_struct *tun; struct tun_file *tfile = file->private_data; struct net_device *dev; int err; if (tfile->detached) return -EINVAL; if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!(ifr->ifr_flags & IFF_NAPI) || (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) return -EINVAL; } dev = __dev_get_by_name(net, ifr->ifr_name); if (dev) { if (ifr->ifr_flags & IFF_TUN_EXCL) return -EBUSY; if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) tun = netdev_priv(dev); else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) tun = netdev_priv(dev); else return -EINVAL; if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != !!(tun->flags & IFF_MULTI_QUEUE)) return -EINVAL; if (tun_not_capable(tun)) return -EPERM; err = security_tun_dev_open(tun->security); if (err < 0) return err; err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, true); if (err < 0) return err; if (tun->flags & IFF_MULTI_QUEUE && (tun->numqueues + tun->numdisabled > 1)) { /* One or more queue has already been attached, no need * to initialize the device again. */ netdev_state_change(dev); return 0; } tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); netdev_state_change(dev); } else { char *name; unsigned long flags = 0; int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_create(); if (err < 0) return err; /* Set dev type */ if (ifr->ifr_flags & IFF_TUN) { /* TUN device */ flags |= IFF_TUN; name = "tun%d"; } else if (ifr->ifr_flags & IFF_TAP) { /* TAP device */ flags |= IFF_TAP; name = "tap%d"; } else return -EINVAL; if (*ifr->ifr_name) name = ifr->ifr_name; dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, NET_NAME_UNKNOWN, tun_setup, queues, queues); if (!dev) return -ENOMEM; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; dev->ifindex = tfile->ifindex; dev->sysfs_groups[0] = &tun_attr_group; tun = netdev_priv(dev); tun->dev = dev; tun->flags = flags; tun->txflt.count = 0; tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); tun->ifr = ifr; tun->file = file; tun_net_initialize(dev); err = register_netdevice(tun->dev); if (err < 0) { free_netdev(dev); return err; } /* free_netdev() won't check refcnt, to avoid race * with dev_put() we need publish tun after registration. */ rcu_assign_pointer(tfile->tun, tun); } if (ifr->ifr_flags & IFF_NO_CARRIER) netif_carrier_off(tun->dev); else netif_carrier_on(tun->dev); /* Make sure persistent devices do not get stuck in * xoff state. */ if (netif_running(tun->dev)) netif_tx_wake_all_queues(tun->dev); strscpy(ifr->ifr_name, tun->dev->name); return 0; } static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) { strscpy(ifr->ifr_name, tun->dev->name); ifr->ifr_flags = tun_flags(tun); } #define PLAIN_GSO (NETIF_F_GSO_UDP_L4 | NETIF_F_TSO | NETIF_F_TSO6) /* This is like a cut-down ethtool ops, except done via tun fd so no * privs required. */ static int set_offload(struct tun_struct *tun, unsigned long arg) { netdev_features_t features = 0; if (arg & TUN_F_CSUM) { features |= NETIF_F_HW_CSUM; arg &= ~TUN_F_CSUM; if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { if (arg & TUN_F_TSO_ECN) { features |= NETIF_F_TSO_ECN; arg &= ~TUN_F_TSO_ECN; } if (arg & TUN_F_TSO4) features |= NETIF_F_TSO; if (arg & TUN_F_TSO6) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } arg &= ~TUN_F_UFO; /* TODO: for now USO4 and USO6 should work simultaneously */ if (arg & TUN_F_USO4 && arg & TUN_F_USO6) { features |= NETIF_F_GSO_UDP_L4; arg &= ~(TUN_F_USO4 | TUN_F_USO6); } /* * Tunnel offload is allowed only if some plain offload is * available, too. */ if (features & PLAIN_GSO && arg & TUN_F_UDP_TUNNEL_GSO) { features |= NETIF_F_GSO_UDP_TUNNEL; if (arg & TUN_F_UDP_TUNNEL_GSO_CSUM) features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; arg &= ~(TUN_F_UDP_TUNNEL_GSO | TUN_F_UDP_TUNNEL_GSO_CSUM); } } /* This gives the user a way to test for new features in future by * trying to set them. */ if (arg) return -EINVAL; tun->set_features = features; tun->dev->wanted_features &= ~TUN_USER_FEATURES; tun->dev->wanted_features |= features; netdev_update_features(tun->dev); return 0; } static void tun_detach_filter(struct tun_struct *tun, int n) { int i; struct tun_file *tfile; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); sk_detach_filter(tfile->socket.sk); release_sock(tfile->socket.sk); } tun->filter_attached = false; } static int tun_attach_filter(struct tun_struct *tun) { int i, ret = 0; struct tun_file *tfile; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (ret) { tun_detach_filter(tun, i); return ret; } } tun->filter_attached = true; return ret; } static void tun_set_sndbuf(struct tun_struct *tun) { struct tun_file *tfile; int i; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_sndbuf = tun->sndbuf; } } static int tun_set_queue(struct file *file, struct ifreq *ifr) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; int ret = 0; rtnl_lock(); if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { tun = tfile->detached; if (!tun) { ret = -EINVAL; goto unlock; } ret = security_tun_dev_attach_queue(tun->security); if (ret < 0) goto unlock; ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, tun->flags & IFF_NAPI_FRAGS, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) ret = -EINVAL; else __tun_detach(tfile, false); } else ret = -EINVAL; if (ret >= 0) netdev_state_change(tun->dev); unlock: rtnl_unlock(); return ret; } static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, void __user *data) { struct bpf_prog *prog; int fd; if (copy_from_user(&fd, data, sizeof(fd))) return -EFAULT; if (fd == -1) { prog = NULL; } else { prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); } return __tun_set_ebpf(tun, prog_p, prog); } /* Return correct value for tun->dev->addr_len based on tun->dev->type. */ static unsigned char tun_get_addr_len(unsigned short type) { switch (type) { case ARPHRD_IP6GRE: case ARPHRD_TUNNEL6: return sizeof(struct in6_addr); case ARPHRD_IPGRE: case ARPHRD_TUNNEL: case ARPHRD_SIT: return 4; case ARPHRD_ETHER: return ETH_ALEN; case ARPHRD_IEEE802154: case ARPHRD_IEEE802154_MONITOR: return IEEE802154_EXTENDED_ADDR_LEN; case ARPHRD_PHONET_PIPE: case ARPHRD_PPP: case ARPHRD_NONE: return 0; case ARPHRD_6LOWPAN: return EUI64_ADDR_LEN; case ARPHRD_FDDI: return FDDI_K_ALEN; case ARPHRD_HIPPI: return HIPPI_ALEN; case ARPHRD_IEEE802: return FC_ALEN; case ARPHRD_ROSE: return ROSE_ADDR_LEN; case ARPHRD_NETROM: return AX25_ADDR_LEN; case ARPHRD_LOCALTLK: return LTALK_ALEN; default: return 0; } } static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { struct tun_file *tfile = file->private_data; struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; unsigned int carrier; struct ifreq ifr; kuid_t owner; kgid_t group; int ifindex; int sndbuf; int ret; bool do_notify = false; if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; } else { memset(&ifr, 0, sizeof(ifr)); } if (cmd == TUNGETFEATURES) { /* Currently this just means: "what IFF flags are valid?". * This is needed because we never checked for invalid flags on * TUNSETIFF. */ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | TUN_FEATURES, (unsigned int __user*)argp); } else if (cmd == TUNSETQUEUE) { return tun_set_queue(file, &ifr); } else if (cmd == SIOCGSKNS) { if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; return open_related_ns(&net->ns, get_net_ns); } rtnl_lock(); tun = tun_get(tfile); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) goto unlock; ifr.ifr_name[IFNAMSIZ-1] = '\0'; ret = tun_set_iff(net, file, &ifr); if (ret) goto unlock; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; goto unlock; } if (cmd == TUNSETIFINDEX) { ret = -EPERM; if (tun) goto unlock; ret = -EFAULT; if (copy_from_user(&ifindex, argp, sizeof(ifindex))) goto unlock; ret = -EINVAL; if (ifindex < 0) goto unlock; ret = 0; tfile->ifindex = ifindex; goto unlock; } ret = -EBADFD; if (!tun) goto unlock; netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); net = dev_net(tun->dev); ret = 0; switch (cmd) { case TUNGETIFF: tun_get_iff(tun, &ifr); if (tfile->detached) ifr.ifr_flags |= IFF_DETACH_QUEUE; if (!tfile->socket.sk->sk_filter) ifr.ifr_flags |= IFF_NOFILTER; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case TUNSETNOCSUM: /* Disable/Enable checksum */ /* [unimplemented] */ netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", arg ? "disabled" : "enabled"); break; case TUNSETPERSIST: /* Disable/Enable persist mode. Keep an extra reference to the * module to prevent the module being unprobed. */ if (arg && !(tun->flags & IFF_PERSIST)) { tun->flags |= IFF_PERSIST; __module_get(THIS_MODULE); do_notify = true; } if (!arg && (tun->flags & IFF_PERSIST)) { tun->flags &= ~IFF_PERSIST; module_put(THIS_MODULE); do_notify = true; } netif_info(tun, drv, tun->dev, "persist %s\n", arg ? "enabled" : "disabled"); break; case TUNSETOWNER: /* Set owner of the device */ owner = make_kuid(current_user_ns(), arg); if (!uid_valid(owner)) { ret = -EINVAL; break; } tun->owner = owner; do_notify = true; netif_info(tun, drv, tun->dev, "owner set to %u\n", from_kuid(&init_user_ns, tun->owner)); break; case TUNSETGROUP: /* Set group of the device */ group = make_kgid(current_user_ns(), arg); if (!gid_valid(group)) { ret = -EINVAL; break; } tun->group = group; do_notify = true; netif_info(tun, drv, tun->dev, "group set to %u\n", from_kgid(&init_user_ns, tun->group)); break; case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { netif_info(tun, drv, tun->dev, "Linktype set failed because interface is up\n"); ret = -EBUSY; } else { ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, tun->dev); ret = notifier_to_errno(ret); if (ret) { netif_info(tun, drv, tun->dev, "Refused to change device type\n"); break; } tun->dev->type = (int) arg; tun->dev->addr_len = tun_get_addr_len(tun->dev->type); netif_info(tun, drv, tun->dev, "linktype set to %d\n", tun->dev->type); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, tun->dev); } break; case TUNSETDEBUG: tun->msg_enable = (u32)arg; break; case TUNSETOFFLOAD: ret = set_offload(tun, arg); break; case TUNSETTXFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = update_filter(&tun->txflt, (void __user *)arg); break; case SIOCGIFHWADDR: /* Get hw address */ netif_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case SIOCSIFHWADDR: /* Set hw address */ if (tun->dev->addr_len > sizeof(ifr.ifr_hwaddr)) { ret = -EINVAL; break; } ret = dev_set_mac_address_user(tun->dev, (struct sockaddr_storage *)&ifr.ifr_hwaddr, NULL); break; case TUNGETSNDBUF: sndbuf = tfile->socket.sk->sk_sndbuf; if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) ret = -EFAULT; break; case TUNSETSNDBUF: if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { ret = -EFAULT; break; } if (sndbuf <= 0) { ret = -EINVAL; break; } tun->sndbuf = sndbuf; tun_set_sndbuf(tun); break; case TUNATTACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) break; ret = tun_attach_filter(tun); break; case TUNDETACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = 0; tun_detach_filter(tun, tun->numqueues); break; case TUNGETFILTER: ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) break; ret = 0; break; case TUNSETSTEERINGEBPF: ret = tun_set_ebpf(tun, &tun->steering_prog, argp); break; case TUNSETFILTEREBPF: ret = tun_set_ebpf(tun, &tun->filter_prog, argp); break; case TUNSETCARRIER: ret = -EFAULT; if (copy_from_user(&carrier, argp, sizeof(carrier))) goto unlock; ret = tun_net_change_carrier(tun->dev, (bool)carrier); break; case TUNGETDEVNETNS: ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto unlock; ret = open_related_ns(&net->ns, get_net_ns); break; default: ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp); break; } if (do_notify) netdev_state_change(tun->dev); unlock: rtnl_unlock(); if (tun) tun_put(tun); return ret; } static long tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); } #ifdef CONFIG_COMPAT static long tun_chr_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case TUNSETIFF: case TUNGETIFF: case TUNSETTXFILTER: case TUNGETSNDBUF: case TUNSETSNDBUF: case SIOCGIFHWADDR: case SIOCSIFHWADDR: arg = (unsigned long)compat_ptr(arg); break; default: arg = (compat_ulong_t)arg; break; } /* * compat_ifreq is shorter than ifreq, so we must not access beyond * the end of that structure. All fields that are used in this * driver are compatible though, we don't need to convert the * contents. */ return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); } #endif /* CONFIG_COMPAT */ static int tun_chr_fasync(int fd, struct file *file, int on) { struct tun_file *tfile = file->private_data; int ret; if (on) { ret = file_f_owner_allocate(file); if (ret) goto out; } if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) goto out; if (on) { __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); tfile->flags |= TUN_FASYNC; } else tfile->flags &= ~TUN_FASYNC; ret = 0; out: return ret; } static int tun_chr_open(struct inode *inode, struct file * file) { struct net *net = current->nsproxy->net_ns; struct tun_file *tfile; tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto, 0); if (!tfile) return -ENOMEM; if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { sk_free(&tfile->sk); return -ENOMEM; } mutex_init(&tfile->napi_mutex); RCU_INIT_POINTER(tfile->tun, NULL); tfile->flags = 0; tfile->ifindex = 0; init_waitqueue_head(&tfile->socket.wq.wait); tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; file->private_data = tfile; INIT_LIST_HEAD(&tfile->next); sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); /* tun groks IOCB_NOWAIT just fine, mark it as such */ file->f_mode |= FMODE_NOWAIT; return 0; } static int tun_chr_close(struct inode *inode, struct file *file) { struct tun_file *tfile = file->private_data; tun_detach(tfile, true); return 0; } #ifdef CONFIG_PROC_FS static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; struct ifreq ifr; memset(&ifr, 0, sizeof(ifr)); rtnl_lock(); tun = tun_get(tfile); if (tun) tun_get_iff(tun, &ifr); rtnl_unlock(); if (tun) tun_put(tun); seq_printf(m, "iff:\t%s\n", ifr.ifr_name); } #endif static const struct file_operations tun_fops = { .owner = THIS_MODULE, .read_iter = tun_chr_read_iter, .write_iter = tun_chr_write_iter, .poll = tun_chr_poll, .unlocked_ioctl = tun_chr_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = tun_chr_compat_ioctl, #endif .open = tun_chr_open, .release = tun_chr_close, .fasync = tun_chr_fasync, #ifdef CONFIG_PROC_FS .show_fdinfo = tun_chr_show_fdinfo, #endif }; static struct miscdevice tun_miscdev = { .minor = TUN_MINOR, .name = "tun", .nodename = "net/tun", .fops = &tun_fops, }; /* ethtool interface */ static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; } static int tun_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); return 0; } static int tun_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); return 0; } static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tun_struct *tun = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: strscpy(info->bus_info, "tun", sizeof(info->bus_info)); break; case IFF_TAP: strscpy(info->bus_info, "tap", sizeof(info->bus_info)); break; } } static u32 tun_get_msglevel(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); return tun->msg_enable; } static void tun_set_msglevel(struct net_device *dev, u32 value) { struct tun_struct *tun = netdev_priv(dev); tun->msg_enable = value; } static int tun_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); ec->rx_max_coalesced_frames = tun->rx_batched; return 0; } static int tun_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) tun->rx_batched = NAPI_POLL_WEIGHT; else tun->rx_batched = ec->rx_max_coalesced_frames; return 0; } static void tun_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct tun_struct *tun = netdev_priv(dev); channels->combined_count = tun->numqueues; channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; } static const struct ethtool_ops tun_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, .get_drvinfo = tun_get_drvinfo, .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, .get_channels = tun_get_channels, .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = tun_get_coalesce, .set_coalesce = tun_set_coalesce, .get_link_ksettings = tun_get_link_ksettings, .set_link_ksettings = tun_set_link_ksettings, }; static int tun_queue_resize(struct tun_struct *tun) { struct net_device *dev = tun->dev; struct tun_file *tfile; struct ptr_ring **rings; int n = tun->numqueues + tun->numdisabled; int ret, i; rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); if (!rings) return -ENOMEM; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); rings[i] = &tfile->tx_ring; } list_for_each_entry(tfile, &tun->disabled, next) rings[i++] = &tfile->tx_ring; ret = ptr_ring_resize_multiple_bh(rings, n, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free); kfree(rings); return ret; } static int tun_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct tun_struct *tun = netdev_priv(dev); int i; if (dev->rtnl_link_ops != &tun_link_ops) return NOTIFY_DONE; switch (event) { case NETDEV_CHANGE_TX_QUEUE_LEN: if (tun_queue_resize(tun)) return NOTIFY_BAD; break; case NETDEV_UP: for (i = 0; i < tun->numqueues; i++) { struct tun_file *tfile; tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_write_space(tfile->socket.sk); } break; default: break; } return NOTIFY_DONE; } static struct notifier_block tun_notifier_block __read_mostly = { .notifier_call = tun_device_event, }; static int __init tun_init(void) { int ret = 0; pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); ret = rtnl_link_register(&tun_link_ops); if (ret) { pr_err("Can't register link_ops\n"); goto err_linkops; } ret = misc_register(&tun_miscdev); if (ret) { pr_err("Can't register misc device %d\n", TUN_MINOR); goto err_misc; } ret = register_netdevice_notifier(&tun_notifier_block); if (ret) { pr_err("Can't register netdevice notifier\n"); goto err_notifier; } return 0; err_notifier: misc_deregister(&tun_miscdev); err_misc: rtnl_link_unregister(&tun_link_ops); err_linkops: return ret; } static void __exit tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); unregister_netdevice_notifier(&tun_notifier_block); } /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ struct socket *tun_get_socket(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->socket; } EXPORT_SYMBOL_GPL(tun_get_socket); struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->tx_ring; } EXPORT_SYMBOL_GPL(tun_get_tx_ring); module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(TUN_MINOR); MODULE_ALIAS("devname:net/tun"); MODULE_IMPORT_NS("NETDEV_INTERNAL");
2540 331 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H #include <linux/bitops.h> #include <linux/wordpart.h> struct word_at_a_time { const unsigned long one_bits, high_bits; }; #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } /* Return nonzero if it has a zero */ static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) { unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; *bits = mask; return mask; } static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) { return bits; } #ifdef CONFIG_64BIT /* Keep the initial has_zero() value for both bitmask and size calc */ #define create_zero_mask(bits) (bits) static inline unsigned long zero_bytemask(unsigned long bits) { bits = (bits - 1) & ~bits; return bits >> 7; } #define find_zero(bits) (__ffs(bits) >> 3) #else /* Create the final mask for both bytemask and size */ static inline unsigned long create_zero_mask(unsigned long bits) { bits = (bits - 1) & ~bits; return bits >> 7; } /* The mask we created is directly usable as a bytemask */ #define zero_bytemask(mask) (mask) /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ static inline unsigned long find_zero(unsigned long mask) { /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ long a = (0x0ff0001+mask) >> 23; /* Fix the 1 for 00 case */ return a & mask; } #endif /* * Load an unaligned word from kernel space. * * In the (very unlikely) case of the word being a page-crosser * and the next page not being mapped, take the exception and * return zeroes in the non-existing part. */ static inline unsigned long load_unaligned_zeropad(const void *addr) { unsigned long ret; asm volatile( "1: mov %[mem], %[ret]\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_ZEROPAD) : [ret] "=r" (ret) : [mem] "m" (*(unsigned long *)addr)); return ret; } #endif /* _ASM_WORD_AT_A_TIME_H */
4 7 27 53 32 55 42 33 26 27 16 1 16 16 2 2 3 2 2 2 2 2 2 11 31 20 11 4 26 29 13 29 29 3 3 3 3 1 1 14 14 14 14 1 13 9 9 9 9 9 9 9 9 9 9 17 17 15 15 15 15 3 2 1 30 27 28 28 29 12 19 19 4 29 29 5 27 1 25 23 1 1 11 21 11 22 1 21 11 13 4 4 4 4 4 1 19 17 20 16 20 2 16 20 20 20 19 19 19 17 2 19 19 20 20 16 4 44 1 43 42 45 1 1 43 18 18 19 19 19 19 19 18 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 3 3 1 2 2 1 2 34 35 21 2 11 11 11 11 8 8 7 1 1 1 9 9 9 9 9 1 9 9 9 9 4 4 4 4 4 10 1 9 10 10 10 3 10 9 9 9 1 3 1 2 2 2 2 1 1 8 8 2 3 3 2 1 3 14 10 3 11 11 11 1 9 1 10 10 10 10 1 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. * * The iopt_pages is the center of the storage and motion of PFNs. Each * iopt_pages represents a logical linear array of full PFNs. The array is 0 * based and has npages in it. Accessors use 'index' to refer to the entry in * this logical array, regardless of its storage location. * * PFNs are stored in a tiered scheme: * 1) iopt_pages::pinned_pfns xarray * 2) An iommu_domain * 3) The origin of the PFNs, i.e. the userspace pointer * * PFN have to be copied between all combinations of tiers, depending on the * configuration. * * When a PFN is taken out of the userspace pointer it is pinned exactly once. * The storage locations of the PFN's index are tracked in the two interval * trees. If no interval includes the index then it is not pinned. * * If access_itree includes the PFN's index then an in-kernel access has * requested the page. The PFN is stored in the xarray so other requestors can * continue to find it. * * If the domains_itree includes the PFN's index then an iommu_domain is storing * the PFN and it can be read back using iommu_iova_to_phys(). To avoid * duplicating storage the xarray is not used if only iommu_domains are using * the PFN's index. * * As a general principle this is designed so that destroy never fails. This * means removing an iommu_domain or releasing a in-kernel access will not fail * due to insufficient memory. In practice this means some cases have to hold * PFNs in the xarray even though they are also being stored in an iommu_domain. * * While the iopt_pages can use an iommu_domain as storage, it does not have an * IOVA itself. Instead the iopt_area represents a range of IOVA and uses the * iopt_pages as the PFN provider. Multiple iopt_areas can share the iopt_pages * and reference their own slice of the PFN array, with sub page granularity. * * In this file the term 'last' indicates an inclusive and closed interval, eg * [0,0] refers to a single PFN. 'end' means an open range, eg [0,0) refers to * no PFNs. * * Be cautious of overflow. An IOVA can go all the way up to U64_MAX, so * last_iova + 1 can overflow. An iopt_pages index will always be much less than * ULONG_MAX so last_index + 1 cannot overflow. */ #include <linux/file.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/iommufd.h> #include <linux/kthread.h> #include <linux/overflow.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include "double_span.h" #include "io_pagetable.h" #ifndef CONFIG_IOMMUFD_TEST #define TEMP_MEMORY_LIMIT 65536 #else #define TEMP_MEMORY_LIMIT iommufd_test_memory_limit #endif #define BATCH_BACKUP_SIZE 32 /* * More memory makes pin_user_pages() and the batching more efficient, but as * this is only a performance optimization don't try too hard to get it. A 64k * allocation can hold about 26M of 4k pages and 13G of 2M pages in an * pfn_batch. Various destroy paths cannot fail and provide a small amount of * stack memory as a backup contingency. If backup_len is given this cannot * fail. */ static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len) { void *res; if (WARN_ON(*size == 0)) return NULL; if (*size < backup_len) return backup; if (!backup && iommufd_should_fail()) return NULL; *size = min_t(size_t, *size, TEMP_MEMORY_LIMIT); res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (res) return res; *size = PAGE_SIZE; if (backup_len) { res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (res) return res; *size = backup_len; return backup; } return kmalloc(*size, GFP_KERNEL); } void interval_tree_double_span_iter_update( struct interval_tree_double_span_iter *iter) { unsigned long last_hole = ULONG_MAX; unsigned int i; for (i = 0; i != ARRAY_SIZE(iter->spans); i++) { if (interval_tree_span_iter_done(&iter->spans[i])) { iter->is_used = -1; return; } if (iter->spans[i].is_hole) { last_hole = min(last_hole, iter->spans[i].last_hole); continue; } iter->is_used = i + 1; iter->start_used = iter->spans[i].start_used; iter->last_used = min(iter->spans[i].last_used, last_hole); return; } iter->is_used = 0; iter->start_hole = iter->spans[0].start_hole; iter->last_hole = min(iter->spans[0].last_hole, iter->spans[1].last_hole); } void interval_tree_double_span_iter_first( struct interval_tree_double_span_iter *iter, struct rb_root_cached *itree1, struct rb_root_cached *itree2, unsigned long first_index, unsigned long last_index) { unsigned int i; iter->itrees[0] = itree1; iter->itrees[1] = itree2; for (i = 0; i != ARRAY_SIZE(iter->spans); i++) interval_tree_span_iter_first(&iter->spans[i], iter->itrees[i], first_index, last_index); interval_tree_double_span_iter_update(iter); } void interval_tree_double_span_iter_next( struct interval_tree_double_span_iter *iter) { unsigned int i; if (iter->is_used == -1 || iter->last_hole == iter->spans[0].last_index) { iter->is_used = -1; return; } for (i = 0; i != ARRAY_SIZE(iter->spans); i++) interval_tree_span_iter_advance( &iter->spans[i], iter->itrees[i], iter->last_hole + 1); interval_tree_double_span_iter_update(iter); } static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) { int rc; rc = check_add_overflow(pages->npinned, npages, &pages->npinned); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(rc || pages->npinned > pages->npages); } static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) { int rc; rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(rc || pages->npinned > pages->npages); } static void iopt_pages_err_unpin(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **page_list) { unsigned long npages = last_index - start_index + 1; unpin_user_pages(page_list, npages); iopt_pages_sub_npinned(pages, npages); } /* * index is the number of PAGE_SIZE units from the start of the area's * iopt_pages. If the iova is sub page-size then the area has an iova that * covers a portion of the first and last pages in the range. */ static unsigned long iopt_area_index_to_iova(struct iopt_area *area, unsigned long index) { if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(index < iopt_area_index(area) || index > iopt_area_last_index(area)); index -= iopt_area_index(area); if (index == 0) return iopt_area_iova(area); return iopt_area_iova(area) - area->page_offset + index * PAGE_SIZE; } static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area, unsigned long index) { if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(index < iopt_area_index(area) || index > iopt_area_last_index(area)); if (index == iopt_area_last_index(area)) return iopt_area_last_iova(area); return iopt_area_iova(area) - area->page_offset + (index - iopt_area_index(area) + 1) * PAGE_SIZE - 1; } static void iommu_unmap_nofail(struct iommu_domain *domain, unsigned long iova, size_t size) { size_t ret; ret = iommu_unmap(domain, iova, size); /* * It is a logic error in this code or a driver bug if the IOMMU unmaps * something other than exactly as requested. This implies that the * iommu driver may not fail unmap for reasons beyond bad agruments. * Particularly, the iommu driver may not do a memory allocation on the * unmap path. */ WARN_ON(ret != size); } static void iopt_area_unmap_domain_range(struct iopt_area *area, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index) { unsigned long start_iova = iopt_area_index_to_iova(area, start_index); iommu_unmap_nofail(domain, start_iova, iopt_area_index_to_iova_last(area, last_index) - start_iova + 1); } static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, unsigned long index) { struct interval_tree_node *node; node = interval_tree_iter_first(&pages->domains_itree, index, index); if (!node) return NULL; return container_of(node, struct iopt_area, pages_node); } /* * A simple datastructure to hold a vector of PFNs, optimized for contiguous * PFNs. This is used as a temporary holding memory for shuttling pfns from one * place to another. Generally everything is made more efficient if operations * work on the largest possible grouping of pfns. eg fewer lock/unlock cycles, * better cache locality, etc */ struct pfn_batch { unsigned long *pfns; u32 *npfns; unsigned int array_size; unsigned int end; unsigned int total_pfns; }; static void batch_clear(struct pfn_batch *batch) { batch->total_pfns = 0; batch->end = 0; batch->pfns[0] = 0; batch->npfns[0] = 0; } /* * Carry means we carry a portion of the final hugepage over to the front of the * batch */ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns) { if (!keep_pfns) return batch_clear(batch); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(!batch->end || batch->npfns[batch->end - 1] < keep_pfns); batch->total_pfns = keep_pfns; batch->pfns[0] = batch->pfns[batch->end - 1] + (batch->npfns[batch->end - 1] - keep_pfns); batch->npfns[0] = keep_pfns; batch->end = 1; } static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns) { if (!batch->total_pfns) return; if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(batch->total_pfns != batch->npfns[0]); skip_pfns = min(batch->total_pfns, skip_pfns); batch->pfns[0] += skip_pfns; batch->npfns[0] -= skip_pfns; batch->total_pfns -= skip_pfns; } static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) { const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns); size_t size = max_pages * elmsz; batch->pfns = temp_kmalloc(&size, backup, backup_len); if (!batch->pfns) return -ENOMEM; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz)) return -EINVAL; batch->array_size = size / elmsz; batch->npfns = (u32 *)(batch->pfns + batch->array_size); batch_clear(batch); return 0; } static int batch_init(struct pfn_batch *batch, size_t max_pages) { return __batch_init(batch, max_pages, NULL, 0); } static void batch_init_backup(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) { __batch_init(batch, max_pages, backup, backup_len); } static void batch_destroy(struct pfn_batch *batch, void *backup) { if (batch->pfns != backup) kfree(batch->pfns); } static bool batch_add_pfn_num(struct pfn_batch *batch, unsigned long pfn, u32 nr) { const unsigned int MAX_NPFNS = type_max(typeof(*batch->npfns)); unsigned int end = batch->end; if (end && pfn == batch->pfns[end - 1] + batch->npfns[end - 1] && nr <= MAX_NPFNS - batch->npfns[end - 1]) { batch->npfns[end - 1] += nr; } else if (end < batch->array_size) { batch->pfns[end] = pfn; batch->npfns[end] = nr; batch->end++; } else { return false; } batch->total_pfns += nr; return true; } static void batch_remove_pfn_num(struct pfn_batch *batch, unsigned long nr) { batch->npfns[batch->end - 1] -= nr; if (batch->npfns[batch->end - 1] == 0) batch->end--; batch->total_pfns -= nr; } /* true if the pfn was added, false otherwise */ static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) { return batch_add_pfn_num(batch, pfn, 1); } /* * Fill the batch with pfns from the domain. When the batch is full, or it * reaches last_index, the function will return. The caller should use * batch->total_pfns to determine the starting point for the next iteration. */ static void batch_from_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) { unsigned int page_offset = 0; unsigned long iova; phys_addr_t phys; iova = iopt_area_index_to_iova(area, start_index); if (start_index == iopt_area_index(area)) page_offset = area->page_offset; while (start_index <= last_index) { /* * This is pretty slow, it would be nice to get the page size * back from the driver, or have the driver directly fill the * batch. */ phys = iommu_iova_to_phys(domain, iova) - page_offset; if (!batch_add_pfn(batch, PHYS_PFN(phys))) return; iova += PAGE_SIZE - page_offset; page_offset = 0; start_index++; } } static struct page **raw_pages_from_domain(struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index, struct page **out_pages) { unsigned int page_offset = 0; unsigned long iova; phys_addr_t phys; iova = iopt_area_index_to_iova(area, start_index); if (start_index == iopt_area_index(area)) page_offset = area->page_offset; while (start_index <= last_index) { phys = iommu_iova_to_phys(domain, iova) - page_offset; *(out_pages++) = pfn_to_page(PHYS_PFN(phys)); iova += PAGE_SIZE - page_offset; page_offset = 0; start_index++; } return out_pages; } /* Continues reading a domain until we reach a discontinuity in the pfns. */ static void batch_from_domain_continue(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) { unsigned int array_size = batch->array_size; batch->array_size = batch->end; batch_from_domain(batch, domain, area, start_index, last_index); batch->array_size = array_size; } /* * This is part of the VFIO compatibility support for VFIO_TYPE1_IOMMU. That * mode permits splitting a mapped area up, and then one of the splits is * unmapped. Doing this normally would cause us to violate our invariant of * pairing map/unmap. Thus, to support old VFIO compatibility disable support * for batching consecutive PFNs. All PFNs mapped into the iommu are done in * PAGE_SIZE units, not larger or smaller. */ static int batch_iommu_map_small(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { unsigned long start_iova = iova; int rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE || size % PAGE_SIZE); while (size) { rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot, GFP_KERNEL_ACCOUNT); if (rc) goto err_unmap; iova += PAGE_SIZE; paddr += PAGE_SIZE; size -= PAGE_SIZE; } return 0; err_unmap: if (start_iova != iova) iommu_unmap_nofail(domain, start_iova, iova - start_iova); return rc; } static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index) { bool disable_large_pages = area->iopt->disable_large_pages; unsigned long last_iova = iopt_area_last_iova(area); unsigned int page_offset = 0; unsigned long start_iova; unsigned long next_iova; unsigned int cur = 0; unsigned long iova; int rc; /* The first index might be a partial page */ if (start_index == iopt_area_index(area)) page_offset = area->page_offset; next_iova = iova = start_iova = iopt_area_index_to_iova(area, start_index); while (cur < batch->end) { next_iova = min(last_iova + 1, next_iova + batch->npfns[cur] * PAGE_SIZE - page_offset); if (disable_large_pages) rc = batch_iommu_map_small( domain, iova, PFN_PHYS(batch->pfns[cur]) + page_offset, next_iova - iova, area->iommu_prot); else rc = iommu_map(domain, iova, PFN_PHYS(batch->pfns[cur]) + page_offset, next_iova - iova, area->iommu_prot, GFP_KERNEL_ACCOUNT); if (rc) goto err_unmap; iova = next_iova; page_offset = 0; cur++; } return 0; err_unmap: if (start_iova != iova) iommu_unmap_nofail(domain, start_iova, iova - start_iova); return rc; } static void batch_from_xarray(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) { XA_STATE(xas, xa, start_index); void *entry; rcu_read_lock(); while (true) { entry = xas_next(&xas); if (xas_retry(&xas, entry)) continue; WARN_ON(!xa_is_value(entry)); if (!batch_add_pfn(batch, xa_to_value(entry)) || start_index == last_index) break; start_index++; } rcu_read_unlock(); } static void batch_from_xarray_clear(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) { XA_STATE(xas, xa, start_index); void *entry; xas_lock(&xas); while (true) { entry = xas_next(&xas); if (xas_retry(&xas, entry)) continue; WARN_ON(!xa_is_value(entry)); if (!batch_add_pfn(batch, xa_to_value(entry))) break; xas_store(&xas, NULL); if (start_index == last_index) break; start_index++; } xas_unlock(&xas); } static void clear_xarray(struct xarray *xa, unsigned long start_index, unsigned long last_index) { XA_STATE(xas, xa, start_index); void *entry; xas_lock(&xas); xas_for_each(&xas, entry, last_index) xas_store(&xas, NULL); xas_unlock(&xas); } static int pages_to_xarray(struct xarray *xa, unsigned long start_index, unsigned long last_index, struct page **pages) { struct page **end_pages = pages + (last_index - start_index) + 1; struct page **half_pages = pages + (end_pages - pages) / 2; XA_STATE(xas, xa, start_index); do { void *old; xas_lock(&xas); while (pages != end_pages) { /* xarray does not participate in fault injection */ if (pages == half_pages && iommufd_should_fail()) { xas_set_err(&xas, -EINVAL); xas_unlock(&xas); /* aka xas_destroy() */ xas_nomem(&xas, GFP_KERNEL); goto err_clear; } old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages))); if (xas_error(&xas)) break; WARN_ON(old); pages++; xas_next(&xas); } xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); err_clear: if (xas_error(&xas)) { if (xas.xa_index != start_index) clear_xarray(xa, start_index, xas.xa_index - 1); return xas_error(&xas); } return 0; } static void batch_from_pages(struct pfn_batch *batch, struct page **pages, size_t npages) { struct page **end = pages + npages; for (; pages != end; pages++) if (!batch_add_pfn(batch, page_to_pfn(*pages))) break; } static int batch_from_folios(struct pfn_batch *batch, struct folio ***folios_p, unsigned long *offset_p, unsigned long npages) { int rc = 0; struct folio **folios = *folios_p; unsigned long offset = *offset_p; while (npages) { struct folio *folio = *folios; unsigned long nr = folio_nr_pages(folio) - offset; unsigned long pfn = page_to_pfn(folio_page(folio, offset)); nr = min(nr, npages); npages -= nr; if (!batch_add_pfn_num(batch, pfn, nr)) break; if (nr > 1) { rc = folio_add_pins(folio, nr - 1); if (rc) { batch_remove_pfn_num(batch, nr); goto out; } } folios++; offset = 0; } out: *folios_p = folios; *offset_p = offset; return rc; } static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, unsigned int first_page_off, size_t npages) { unsigned int cur = 0; while (first_page_off) { if (batch->npfns[cur] > first_page_off) break; first_page_off -= batch->npfns[cur]; cur++; } while (npages) { size_t to_unpin = min_t(size_t, npages, batch->npfns[cur] - first_page_off); unpin_user_page_range_dirty_lock( pfn_to_page(batch->pfns[cur] + first_page_off), to_unpin, pages->writable); iopt_pages_sub_npinned(pages, to_unpin); cur++; first_page_off = 0; npages -= to_unpin; } } static void copy_data_page(struct page *page, void *data, unsigned long offset, size_t length, unsigned int flags) { void *mem; mem = kmap_local_page(page); if (flags & IOMMUFD_ACCESS_RW_WRITE) { memcpy(mem + offset, data, length); set_page_dirty_lock(page); } else { memcpy(data, mem + offset, length); } kunmap_local(mem); } static unsigned long batch_rw(struct pfn_batch *batch, void *data, unsigned long offset, unsigned long length, unsigned int flags) { unsigned long copied = 0; unsigned int npage = 0; unsigned int cur = 0; while (cur < batch->end) { unsigned long bytes = min(length, PAGE_SIZE - offset); copy_data_page(pfn_to_page(batch->pfns[cur] + npage), data, offset, bytes, flags); offset = 0; length -= bytes; data += bytes; copied += bytes; npage++; if (npage == batch->npfns[cur]) { npage = 0; cur++; } if (!length) break; } return copied; } /* pfn_reader_user is just the pin_user_pages() path */ struct pfn_reader_user { struct page **upages; size_t upages_len; unsigned long upages_start; unsigned long upages_end; unsigned int gup_flags; /* * 1 means mmget() and mmap_read_lock(), 0 means only mmget(), -1 is * neither */ int locked; /* The following are only valid if file != NULL. */ struct file *file; struct folio **ufolios; size_t ufolios_len; unsigned long ufolios_offset; struct folio **ufolios_next; }; static void pfn_reader_user_init(struct pfn_reader_user *user, struct iopt_pages *pages) { user->upages = NULL; user->upages_len = 0; user->upages_start = 0; user->upages_end = 0; user->locked = -1; user->gup_flags = FOLL_LONGTERM; if (pages->writable) user->gup_flags |= FOLL_WRITE; user->file = (pages->type == IOPT_ADDRESS_FILE) ? pages->file : NULL; user->ufolios = NULL; user->ufolios_len = 0; user->ufolios_next = NULL; user->ufolios_offset = 0; } static void pfn_reader_user_destroy(struct pfn_reader_user *user, struct iopt_pages *pages) { if (user->locked != -1) { if (user->locked) mmap_read_unlock(pages->source_mm); if (!user->file && pages->source_mm != current->mm) mmput(pages->source_mm); user->locked = -1; } kfree(user->upages); user->upages = NULL; kfree(user->ufolios); user->ufolios = NULL; } static long pin_memfd_pages(struct pfn_reader_user *user, unsigned long start, unsigned long npages) { unsigned long i; unsigned long offset; unsigned long npages_out = 0; struct page **upages = user->upages; unsigned long end = start + (npages << PAGE_SHIFT) - 1; long nfolios = user->ufolios_len / sizeof(*user->ufolios); /* * todo: memfd_pin_folios should return the last pinned offset so * we can compute npages pinned, and avoid looping over folios here * if upages == NULL. */ nfolios = memfd_pin_folios(user->file, start, end, user->ufolios, nfolios, &offset); if (nfolios <= 0) return nfolios; offset >>= PAGE_SHIFT; user->ufolios_next = user->ufolios; user->ufolios_offset = offset; for (i = 0; i < nfolios; i++) { struct folio *folio = user->ufolios[i]; unsigned long nr = folio_nr_pages(folio); unsigned long npin = min(nr - offset, npages); npages -= npin; npages_out += npin; if (upages) { if (npin == 1) { *upages++ = folio_page(folio, offset); } else { int rc = folio_add_pins(folio, npin - 1); if (rc) return rc; while (npin--) *upages++ = folio_page(folio, offset++); } } offset = 0; } return npages_out; } static int pfn_reader_user_pin(struct pfn_reader_user *user, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { bool remote_mm = pages->source_mm != current->mm; unsigned long npages = last_index - start_index + 1; unsigned long start; unsigned long unum; uintptr_t uptr; long rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(last_index < start_index)) return -EINVAL; if (!user->file && !user->upages) { /* All undone in pfn_reader_destroy() */ user->upages_len = npages * sizeof(*user->upages); user->upages = temp_kmalloc(&user->upages_len, NULL, 0); if (!user->upages) return -ENOMEM; } if (user->file && !user->ufolios) { user->ufolios_len = npages * sizeof(*user->ufolios); user->ufolios = temp_kmalloc(&user->ufolios_len, NULL, 0); if (!user->ufolios) return -ENOMEM; } if (user->locked == -1) { /* * The majority of usages will run the map task within the mm * providing the pages, so we can optimize into * get_user_pages_fast() */ if (!user->file && remote_mm) { if (!mmget_not_zero(pages->source_mm)) return -EFAULT; } user->locked = 0; } unum = user->file ? user->ufolios_len / sizeof(*user->ufolios) : user->upages_len / sizeof(*user->upages); npages = min_t(unsigned long, npages, unum); if (iommufd_should_fail()) return -EFAULT; if (user->file) { start = pages->start + (start_index * PAGE_SIZE); rc = pin_memfd_pages(user, start, npages); } else if (!remote_mm) { uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); rc = pin_user_pages_fast(uptr, npages, user->gup_flags, user->upages); } else { uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); if (!user->locked) { mmap_read_lock(pages->source_mm); user->locked = 1; } rc = pin_user_pages_remote(pages->source_mm, uptr, npages, user->gup_flags, user->upages, &user->locked); } if (rc <= 0) { if (WARN_ON(!rc)) return -EFAULT; return rc; } iopt_pages_add_npinned(pages, rc); user->upages_start = start_index; user->upages_end = start_index + rc; return 0; } /* This is the "modern" and faster accounting method used by io_uring */ static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) { unsigned long lock_limit; unsigned long cur_pages; unsigned long new_pages; lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; cur_pages = atomic_long_read(&pages->source_user->locked_vm); do { new_pages = cur_pages + npages; if (new_pages > lock_limit) return -ENOMEM; } while (!atomic_long_try_cmpxchg(&pages->source_user->locked_vm, &cur_pages, new_pages)); return 0; } static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) { if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) return; atomic_long_sub(npages, &pages->source_user->locked_vm); } /* This is the accounting method used for compatibility with VFIO */ static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, bool inc, struct pfn_reader_user *user) { bool do_put = false; int rc; if (user && user->locked) { mmap_read_unlock(pages->source_mm); user->locked = 0; /* If we had the lock then we also have a get */ } else if ((!user || (!user->upages && !user->ufolios)) && pages->source_mm != current->mm) { if (!mmget_not_zero(pages->source_mm)) return -EINVAL; do_put = true; } mmap_write_lock(pages->source_mm); rc = __account_locked_vm(pages->source_mm, npages, inc, pages->source_task, false); mmap_write_unlock(pages->source_mm); if (do_put) mmput(pages->source_mm); return rc; } int iopt_pages_update_pinned(struct iopt_pages *pages, unsigned long npages, bool inc, struct pfn_reader_user *user) { int rc = 0; switch (pages->account_mode) { case IOPT_PAGES_ACCOUNT_NONE: break; case IOPT_PAGES_ACCOUNT_USER: if (inc) rc = incr_user_locked_vm(pages, npages); else decr_user_locked_vm(pages, npages); break; case IOPT_PAGES_ACCOUNT_MM: rc = update_mm_locked_vm(pages, npages, inc, user); break; } if (rc) return rc; pages->last_npinned = pages->npinned; if (inc) atomic64_add(npages, &pages->source_mm->pinned_vm); else atomic64_sub(npages, &pages->source_mm->pinned_vm); return 0; } static void update_unpinned(struct iopt_pages *pages) { if (WARN_ON(pages->npinned > pages->last_npinned)) return; if (pages->npinned == pages->last_npinned) return; iopt_pages_update_pinned(pages, pages->last_npinned - pages->npinned, false, NULL); } /* * Changes in the number of pages pinned is done after the pages have been read * and processed. If the user lacked the limit then the error unwind will unpin * everything that was just pinned. This is because it is expensive to calculate * how many pages we have already pinned within a range to generate an accurate * prediction in advance of doing the work to actually pin them. */ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user, struct iopt_pages *pages) { unsigned long npages; bool inc; lockdep_assert_held(&pages->mutex); if (pages->npinned == pages->last_npinned) return 0; if (pages->npinned < pages->last_npinned) { npages = pages->last_npinned - pages->npinned; inc = false; } else { if (iommufd_should_fail()) return -ENOMEM; npages = pages->npinned - pages->last_npinned; inc = true; } return iopt_pages_update_pinned(pages, npages, inc, user); } /* * PFNs are stored in three places, in order of preference: * - The iopt_pages xarray. This is only populated if there is a * iopt_pages_access * - The iommu_domain under an area * - The original PFN source, ie pages->source_mm * * This iterator reads the pfns optimizing to load according to the * above order. */ struct pfn_reader { struct iopt_pages *pages; struct interval_tree_double_span_iter span; struct pfn_batch batch; unsigned long batch_start_index; unsigned long batch_end_index; unsigned long last_index; struct pfn_reader_user user; }; static int pfn_reader_update_pinned(struct pfn_reader *pfns) { return pfn_reader_user_update_pinned(&pfns->user, pfns->pages); } /* * The batch can contain a mixture of pages that are still in use and pages that * need to be unpinned. Unpin only pages that are not held anywhere else. */ static void pfn_reader_unpin(struct pfn_reader *pfns) { unsigned long last = pfns->batch_end_index - 1; unsigned long start = pfns->batch_start_index; struct interval_tree_double_span_iter span; struct iopt_pages *pages = pfns->pages; lockdep_assert_held(&pages->mutex); interval_tree_for_each_double_span(&span, &pages->access_itree, &pages->domains_itree, start, last) { if (span.is_used) continue; batch_unpin(&pfns->batch, pages, span.start_hole - start, span.last_hole - span.start_hole + 1); } } /* Process a single span to load it from the proper storage */ static int pfn_reader_fill_span(struct pfn_reader *pfns) { struct interval_tree_double_span_iter *span = &pfns->span; unsigned long start_index = pfns->batch_end_index; struct pfn_reader_user *user = &pfns->user; unsigned long npages; struct iopt_area *area; int rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(span->last_used < start_index)) return -EINVAL; if (span->is_used == 1) { batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, start_index, span->last_used); return 0; } if (span->is_used == 2) { /* * Pull as many pages from the first domain we find in the * target span. If it is too small then we will be called again * and we'll find another area. */ area = iopt_pages_find_domain_area(pfns->pages, start_index); if (WARN_ON(!area)) return -EINVAL; /* The storage_domain cannot change without the pages mutex */ batch_from_domain( &pfns->batch, area->storage_domain, area, start_index, min(iopt_area_last_index(area), span->last_used)); return 0; } if (start_index >= pfns->user.upages_end) { rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index, span->last_hole); if (rc) return rc; } npages = user->upages_end - start_index; start_index -= user->upages_start; rc = 0; if (!user->file) batch_from_pages(&pfns->batch, user->upages + start_index, npages); else rc = batch_from_folios(&pfns->batch, &user->ufolios_next, &user->ufolios_offset, npages); return rc; } static bool pfn_reader_done(struct pfn_reader *pfns) { return pfns->batch_start_index == pfns->last_index + 1; } static int pfn_reader_next(struct pfn_reader *pfns) { int rc; batch_clear(&pfns->batch); pfns->batch_start_index = pfns->batch_end_index; while (pfns->batch_end_index != pfns->last_index + 1) { unsigned int npfns = pfns->batch.total_pfns; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(interval_tree_double_span_iter_done(&pfns->span))) return -EINVAL; rc = pfn_reader_fill_span(pfns); if (rc) return rc; if (WARN_ON(!pfns->batch.total_pfns)) return -EINVAL; pfns->batch_end_index = pfns->batch_start_index + pfns->batch.total_pfns; if (pfns->batch_end_index == pfns->span.last_used + 1) interval_tree_double_span_iter_next(&pfns->span); /* Batch is full */ if (npfns == pfns->batch.total_pfns) return 0; } return 0; } static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { int rc; lockdep_assert_held(&pages->mutex); pfns->pages = pages; pfns->batch_start_index = start_index; pfns->batch_end_index = start_index; pfns->last_index = last_index; pfn_reader_user_init(&pfns->user, pages); rc = batch_init(&pfns->batch, last_index - start_index + 1); if (rc) return rc; interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree, &pages->domains_itree, start_index, last_index); return 0; } /* * There are many assertions regarding the state of pages->npinned vs * pages->last_pinned, for instance something like unmapping a domain must only * decrement the npinned, and pfn_reader_destroy() must be called only after all * the pins are updated. This is fine for success flows, but error flows * sometimes need to release the pins held inside the pfn_reader before going on * to complete unmapping and releasing pins held in domains. */ static void pfn_reader_release_pins(struct pfn_reader *pfns) { struct iopt_pages *pages = pfns->pages; struct pfn_reader_user *user = &pfns->user; if (user->upages_end > pfns->batch_end_index) { /* Any pages not transferred to the batch are just unpinned */ unsigned long npages = user->upages_end - pfns->batch_end_index; unsigned long start_index = pfns->batch_end_index - user->upages_start; if (!user->file) { unpin_user_pages(user->upages + start_index, npages); } else { long n = user->ufolios_len / sizeof(*user->ufolios); unpin_folios(user->ufolios_next, user->ufolios + n - user->ufolios_next); } iopt_pages_sub_npinned(pages, npages); user->upages_end = pfns->batch_end_index; } if (pfns->batch_start_index != pfns->batch_end_index) { pfn_reader_unpin(pfns); pfns->batch_start_index = pfns->batch_end_index; } } static void pfn_reader_destroy(struct pfn_reader *pfns) { struct iopt_pages *pages = pfns->pages; pfn_reader_release_pins(pfns); pfn_reader_user_destroy(&pfns->user, pfns->pages); batch_destroy(&pfns->batch, NULL); WARN_ON(pages->last_npinned != pages->npinned); } static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { int rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(last_index < start_index)) return -EINVAL; rc = pfn_reader_init(pfns, pages, start_index, last_index); if (rc) return rc; rc = pfn_reader_next(pfns); if (rc) { pfn_reader_destroy(pfns); return rc; } return 0; } static struct iopt_pages *iopt_alloc_pages(unsigned long start_byte, unsigned long length, bool writable) { struct iopt_pages *pages; /* * The iommu API uses size_t as the length, and protect the DIV_ROUND_UP * below from overflow */ if (length > SIZE_MAX - PAGE_SIZE || length == 0) return ERR_PTR(-EINVAL); pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT); if (!pages) return ERR_PTR(-ENOMEM); kref_init(&pages->kref); xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT); mutex_init(&pages->mutex); pages->source_mm = current->mm; mmgrab(pages->source_mm); pages->npages = DIV_ROUND_UP(length + start_byte, PAGE_SIZE); pages->access_itree = RB_ROOT_CACHED; pages->domains_itree = RB_ROOT_CACHED; pages->writable = writable; if (capable(CAP_IPC_LOCK)) pages->account_mode = IOPT_PAGES_ACCOUNT_NONE; else pages->account_mode = IOPT_PAGES_ACCOUNT_USER; pages->source_task = current->group_leader; get_task_struct(current->group_leader); pages->source_user = get_uid(current_user()); return pages; } struct iopt_pages *iopt_alloc_user_pages(void __user *uptr, unsigned long length, bool writable) { struct iopt_pages *pages; unsigned long end; void __user *uptr_down = (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE); if (check_add_overflow((unsigned long)uptr, length, &end)) return ERR_PTR(-EOVERFLOW); pages = iopt_alloc_pages(uptr - uptr_down, length, writable); if (IS_ERR(pages)) return pages; pages->uptr = uptr_down; pages->type = IOPT_ADDRESS_USER; return pages; } struct iopt_pages *iopt_alloc_file_pages(struct file *file, unsigned long start, unsigned long length, bool writable) { struct iopt_pages *pages; unsigned long start_down = ALIGN_DOWN(start, PAGE_SIZE); unsigned long end; if (length && check_add_overflow(start, length - 1, &end)) return ERR_PTR(-EOVERFLOW); pages = iopt_alloc_pages(start - start_down, length, writable); if (IS_ERR(pages)) return pages; pages->file = get_file(file); pages->start = start_down; pages->type = IOPT_ADDRESS_FILE; return pages; } void iopt_release_pages(struct kref *kref) { struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root)); WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root)); WARN_ON(pages->npinned); WARN_ON(!xa_empty(&pages->pinned_pfns)); mmdrop(pages->source_mm); mutex_destroy(&pages->mutex); put_task_struct(pages->source_task); free_uid(pages->source_user); if (pages->type == IOPT_ADDRESS_FILE) fput(pages->file); kfree(pages); } static void iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index, unsigned long *unmapped_end_index, unsigned long real_last_index) { while (start_index <= last_index) { unsigned long batch_last_index; if (*unmapped_end_index <= last_index) { unsigned long start = max(start_index, *unmapped_end_index); if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && batch->total_pfns) WARN_ON(*unmapped_end_index - batch->total_pfns != start_index); batch_from_domain(batch, domain, area, start, last_index); batch_last_index = start_index + batch->total_pfns - 1; } else { batch_last_index = last_index; } if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(batch_last_index > real_last_index); /* * unmaps must always 'cut' at a place where the pfns are not * contiguous to pair with the maps that always install * contiguous pages. Thus, if we have to stop unpinning in the * middle of the domains we need to keep reading pfns until we * find a cut point to do the unmap. The pfns we read are * carried over and either skipped or integrated into the next * batch. */ if (batch_last_index == last_index && last_index != real_last_index) batch_from_domain_continue(batch, domain, area, last_index + 1, real_last_index); if (*unmapped_end_index <= batch_last_index) { iopt_area_unmap_domain_range( area, domain, *unmapped_end_index, start_index + batch->total_pfns - 1); *unmapped_end_index = start_index + batch->total_pfns; } /* unpin must follow unmap */ batch_unpin(batch, pages, 0, batch_last_index - start_index + 1); start_index = batch_last_index + 1; batch_clear_carry(batch, *unmapped_end_index - batch_last_index - 1); } } static void __iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long last_index) { struct interval_tree_double_span_iter span; unsigned long start_index = iopt_area_index(area); unsigned long unmapped_end_index = start_index; u64 backup[BATCH_BACKUP_SIZE]; struct pfn_batch batch; lockdep_assert_held(&pages->mutex); /* * For security we must not unpin something that is still DMA mapped, * so this must unmap any IOVA before we go ahead and unpin the pages. * This creates a complexity where we need to skip over unpinning pages * held in the xarray, but continue to unmap from the domain. * * The domain unmap cannot stop in the middle of a contiguous range of * PFNs. To solve this problem the unpinning step will read ahead to the * end of any contiguous span, unmap that whole span, and then only * unpin the leading part that does not have any accesses. The residual * PFNs that were unmapped but not unpinned are called a "carry" in the * batch as they are moved to the front of the PFN list and continue on * to the next iteration(s). */ batch_init_backup(&batch, last_index + 1, backup, sizeof(backup)); interval_tree_for_each_double_span(&span, &pages->domains_itree, &pages->access_itree, start_index, last_index) { if (span.is_used) { batch_skip_carry(&batch, span.last_used - span.start_used + 1); continue; } iopt_area_unpin_domain(&batch, area, pages, domain, span.start_hole, span.last_hole, &unmapped_end_index, last_index); } /* * If the range ends in a access then we do the residual unmap without * any unpins. */ if (unmapped_end_index != last_index + 1) iopt_area_unmap_domain_range(area, domain, unmapped_end_index, last_index); WARN_ON(batch.total_pfns); batch_destroy(&batch, backup); update_unpinned(pages); } static void iopt_area_unfill_partial_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long end_index) { if (end_index != iopt_area_index(area)) __iopt_area_unfill_domain(area, pages, domain, end_index - 1); } /** * iopt_area_unmap_domain() - Unmap without unpinning PFNs in a domain * @area: The IOVA range to unmap * @domain: The domain to unmap * * The caller must know that unpinning is not required, usually because there * are other domains in the iopt. */ void iopt_area_unmap_domain(struct iopt_area *area, struct iommu_domain *domain) { iommu_unmap_nofail(domain, iopt_area_iova(area), iopt_area_length(area)); } /** * iopt_area_unfill_domain() - Unmap and unpin PFNs in a domain * @area: IOVA area to use * @pages: page supplier for the area (area->pages is NULL) * @domain: Domain to unmap from * * The domain should be removed from the domains_itree before calling. The * domain will always be unmapped, but the PFNs may not be unpinned if there are * still accesses. */ void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain) { __iopt_area_unfill_domain(area, pages, domain, iopt_area_last_index(area)); } /** * iopt_area_fill_domain() - Map PFNs from the area into a domain * @area: IOVA area to use * @domain: Domain to load PFNs into * * Read the pfns from the area's underlying iopt_pages and map them into the * given domain. Called when attaching a new domain to an io_pagetable. */ int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain) { unsigned long done_end_index; struct pfn_reader pfns; int rc; lockdep_assert_held(&area->pages->mutex); rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area), iopt_area_last_index(area)); if (rc) return rc; while (!pfn_reader_done(&pfns)) { done_end_index = pfns.batch_start_index; rc = batch_to_domain(&pfns.batch, domain, area, pfns.batch_start_index); if (rc) goto out_unmap; done_end_index = pfns.batch_end_index; rc = pfn_reader_next(&pfns); if (rc) goto out_unmap; } rc = pfn_reader_update_pinned(&pfns); if (rc) goto out_unmap; goto out_destroy; out_unmap: pfn_reader_release_pins(&pfns); iopt_area_unfill_partial_domain(area, area->pages, domain, done_end_index); out_destroy: pfn_reader_destroy(&pfns); return rc; } /** * iopt_area_fill_domains() - Install PFNs into the area's domains * @area: The area to act on * @pages: The pages associated with the area (area->pages is NULL) * * Called during area creation. The area is freshly created and not inserted in * the domains_itree yet. PFNs are read and loaded into every domain held in the * area's io_pagetable and the area is installed in the domains_itree. * * On failure all domains are left unchanged. */ int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) { unsigned long done_first_end_index; unsigned long done_all_end_index; struct iommu_domain *domain; unsigned long unmap_index; struct pfn_reader pfns; unsigned long index; int rc; lockdep_assert_held(&area->iopt->domains_rwsem); if (xa_empty(&area->iopt->domains)) return 0; mutex_lock(&pages->mutex); rc = pfn_reader_first(&pfns, pages, iopt_area_index(area), iopt_area_last_index(area)); if (rc) goto out_unlock; while (!pfn_reader_done(&pfns)) { done_first_end_index = pfns.batch_end_index; done_all_end_index = pfns.batch_start_index; xa_for_each(&area->iopt->domains, index, domain) { rc = batch_to_domain(&pfns.batch, domain, area, pfns.batch_start_index); if (rc) goto out_unmap; } done_all_end_index = done_first_end_index; rc = pfn_reader_next(&pfns); if (rc) goto out_unmap; } rc = pfn_reader_update_pinned(&pfns); if (rc) goto out_unmap; area->storage_domain = xa_load(&area->iopt->domains, 0); interval_tree_insert(&area->pages_node, &pages->domains_itree); goto out_destroy; out_unmap: pfn_reader_release_pins(&pfns); xa_for_each(&area->iopt->domains, unmap_index, domain) { unsigned long end_index; if (unmap_index < index) end_index = done_first_end_index; else end_index = done_all_end_index; /* * The area is not yet part of the domains_itree so we have to * manage the unpinning specially. The last domain does the * unpin, every other domain is just unmapped. */ if (unmap_index != area->iopt->next_domain_id - 1) { if (end_index != iopt_area_index(area)) iopt_area_unmap_domain_range( area, domain, iopt_area_index(area), end_index - 1); } else { iopt_area_unfill_partial_domain(area, pages, domain, end_index); } } out_destroy: pfn_reader_destroy(&pfns); out_unlock: mutex_unlock(&pages->mutex); return rc; } /** * iopt_area_unfill_domains() - unmap PFNs from the area's domains * @area: The area to act on * @pages: The pages associated with the area (area->pages is NULL) * * Called during area destruction. This unmaps the iova's covered by all the * area's domains and releases the PFNs. */ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) { struct io_pagetable *iopt = area->iopt; struct iommu_domain *domain; unsigned long index; lockdep_assert_held(&iopt->domains_rwsem); mutex_lock(&pages->mutex); if (!area->storage_domain) goto out_unlock; xa_for_each(&iopt->domains, index, domain) if (domain != area->storage_domain) iopt_area_unmap_domain_range( area, domain, iopt_area_index(area), iopt_area_last_index(area)); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb)); interval_tree_remove(&area->pages_node, &pages->domains_itree); iopt_area_unfill_domain(area, pages, area->storage_domain); area->storage_domain = NULL; out_unlock: mutex_unlock(&pages->mutex); } static void iopt_pages_unpin_xarray(struct pfn_batch *batch, struct iopt_pages *pages, unsigned long start_index, unsigned long end_index) { while (start_index <= end_index) { batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index, end_index); batch_unpin(batch, pages, 0, batch->total_pfns); start_index += batch->total_pfns; batch_clear(batch); } } /** * iopt_pages_unfill_xarray() - Update the xarry after removing an access * @pages: The pages to act on * @start_index: Starting PFN index * @last_index: Last PFN index * * Called when an iopt_pages_access is removed, removes pages from the itree. * The access should already be removed from the access_itree. */ void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { struct interval_tree_double_span_iter span; u64 backup[BATCH_BACKUP_SIZE]; struct pfn_batch batch; bool batch_inited = false; lockdep_assert_held(&pages->mutex); interval_tree_for_each_double_span(&span, &pages->access_itree, &pages->domains_itree, start_index, last_index) { if (!span.is_used) { if (!batch_inited) { batch_init_backup(&batch, last_index - start_index + 1, backup, sizeof(backup)); batch_inited = true; } iopt_pages_unpin_xarray(&batch, pages, span.start_hole, span.last_hole); } else if (span.is_used == 2) { /* Covered by a domain */ clear_xarray(&pages->pinned_pfns, span.start_used, span.last_used); } /* Otherwise covered by an existing access */ } if (batch_inited) batch_destroy(&batch, backup); update_unpinned(pages); } /** * iopt_pages_fill_from_xarray() - Fast path for reading PFNs * @pages: The pages to act on * @start_index: The first page index in the range * @last_index: The last page index in the range * @out_pages: The output array to return the pages * * This can be called if the caller is holding a refcount on an * iopt_pages_access that is known to have already been filled. It quickly reads * the pages directly from the xarray. * * This is part of the SW iommu interface to read pages for in-kernel use. */ void iopt_pages_fill_from_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) { XA_STATE(xas, &pages->pinned_pfns, start_index); void *entry; rcu_read_lock(); while (start_index <= last_index) { entry = xas_next(&xas); if (xas_retry(&xas, entry)) continue; WARN_ON(!xa_is_value(entry)); *(out_pages++) = pfn_to_page(xa_to_value(entry)); start_index++; } rcu_read_unlock(); } static int iopt_pages_fill_from_domain(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) { while (start_index != last_index + 1) { unsigned long domain_last; struct iopt_area *area; area = iopt_pages_find_domain_area(pages, start_index); if (WARN_ON(!area)) return -EINVAL; domain_last = min(iopt_area_last_index(area), last_index); out_pages = raw_pages_from_domain(area->storage_domain, area, start_index, domain_last, out_pages); start_index = domain_last + 1; } return 0; } static int iopt_pages_fill(struct iopt_pages *pages, struct pfn_reader_user *user, unsigned long start_index, unsigned long last_index, struct page **out_pages) { unsigned long cur_index = start_index; int rc; while (cur_index != last_index + 1) { user->upages = out_pages + (cur_index - start_index); rc = pfn_reader_user_pin(user, pages, cur_index, last_index); if (rc) goto out_unpin; cur_index = user->upages_end; } return 0; out_unpin: if (start_index != cur_index) iopt_pages_err_unpin(pages, start_index, cur_index - 1, out_pages); return rc; } /** * iopt_pages_fill_xarray() - Read PFNs * @pages: The pages to act on * @start_index: The first page index in the range * @last_index: The last page index in the range * @out_pages: The output array to return the pages, may be NULL * * This populates the xarray and returns the pages in out_pages. As the slow * path this is able to copy pages from other storage tiers into the xarray. * * On failure the xarray is left unchanged. * * This is part of the SW iommu interface to read pages for in-kernel use. */ int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) { struct interval_tree_double_span_iter span; unsigned long xa_end = start_index; struct pfn_reader_user user; int rc; lockdep_assert_held(&pages->mutex); pfn_reader_user_init(&user, pages); user.upages_len = (last_index - start_index + 1) * sizeof(*out_pages); interval_tree_for_each_double_span(&span, &pages->access_itree, &pages->domains_itree, start_index, last_index) { struct page **cur_pages; if (span.is_used == 1) { cur_pages = out_pages + (span.start_used - start_index); iopt_pages_fill_from_xarray(pages, span.start_used, span.last_used, cur_pages); continue; } if (span.is_used == 2) { cur_pages = out_pages + (span.start_used - start_index); iopt_pages_fill_from_domain(pages, span.start_used, span.last_used, cur_pages); rc = pages_to_xarray(&pages->pinned_pfns, span.start_used, span.last_used, cur_pages); if (rc) goto out_clean_xa; xa_end = span.last_used + 1; continue; } /* hole */ cur_pages = out_pages + (span.start_hole - start_index); rc = iopt_pages_fill(pages, &user, span.start_hole, span.last_hole, cur_pages); if (rc) goto out_clean_xa; rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, span.last_hole, cur_pages); if (rc) { iopt_pages_err_unpin(pages, span.start_hole, span.last_hole, cur_pages); goto out_clean_xa; } xa_end = span.last_hole + 1; } rc = pfn_reader_user_update_pinned(&user, pages); if (rc) goto out_clean_xa; user.upages = NULL; pfn_reader_user_destroy(&user, pages); return 0; out_clean_xa: if (start_index != xa_end) iopt_pages_unfill_xarray(pages, start_index, xa_end - 1); user.upages = NULL; pfn_reader_user_destroy(&user, pages); return rc; } /* * This uses the pfn_reader instead of taking a shortcut by using the mm. It can * do every scenario and is fully consistent with what an iommu_domain would * see. */ static int iopt_pages_rw_slow(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, unsigned long offset, void *data, unsigned long length, unsigned int flags) { struct pfn_reader pfns; int rc; mutex_lock(&pages->mutex); rc = pfn_reader_first(&pfns, pages, start_index, last_index); if (rc) goto out_unlock; while (!pfn_reader_done(&pfns)) { unsigned long done; done = batch_rw(&pfns.batch, data, offset, length, flags); data += done; length -= done; offset = 0; pfn_reader_unpin(&pfns); rc = pfn_reader_next(&pfns); if (rc) goto out_destroy; } if (WARN_ON(length != 0)) rc = -EINVAL; out_destroy: pfn_reader_destroy(&pfns); out_unlock: mutex_unlock(&pages->mutex); return rc; } /* * A medium speed path that still allows DMA inconsistencies, but doesn't do any * memory allocations or interval tree searches. */ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, unsigned long offset, void *data, unsigned long length, unsigned int flags) { struct page *page = NULL; int rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(pages->type != IOPT_ADDRESS_USER)) return -EINVAL; if (!mmget_not_zero(pages->source_mm)) return iopt_pages_rw_slow(pages, index, index, offset, data, length, flags); if (iommufd_should_fail()) { rc = -EINVAL; goto out_mmput; } mmap_read_lock(pages->source_mm); rc = pin_user_pages_remote( pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), 1, (flags & IOMMUFD_ACCESS_RW_WRITE) ? FOLL_WRITE : 0, &page, NULL); mmap_read_unlock(pages->source_mm); if (rc != 1) { if (WARN_ON(rc >= 0)) rc = -EINVAL; goto out_mmput; } copy_data_page(page, data, offset, length, flags); unpin_user_page(page); rc = 0; out_mmput: mmput(pages->source_mm); return rc; } /** * iopt_pages_rw_access - Copy to/from a linear slice of the pages * @pages: pages to act on * @start_byte: First byte of pages to copy to/from * @data: Kernel buffer to get/put the data * @length: Number of bytes to copy * @flags: IOMMUFD_ACCESS_RW_* flags * * This will find each page in the range, kmap it and then memcpy to/from * the given kernel buffer. */ int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, void *data, unsigned long length, unsigned int flags) { unsigned long start_index = start_byte / PAGE_SIZE; unsigned long last_index = (start_byte + length - 1) / PAGE_SIZE; bool change_mm = current->mm != pages->source_mm; int rc = 0; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && (flags & __IOMMUFD_ACCESS_RW_SLOW_PATH)) change_mm = true; if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) return -EPERM; if (pages->type == IOPT_ADDRESS_FILE) return iopt_pages_rw_slow(pages, start_index, last_index, start_byte % PAGE_SIZE, data, length, flags); if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(pages->type != IOPT_ADDRESS_USER)) return -EINVAL; if (!(flags & IOMMUFD_ACCESS_RW_KTHREAD) && change_mm) { if (start_index == last_index) return iopt_pages_rw_page(pages, start_index, start_byte % PAGE_SIZE, data, length, flags); return iopt_pages_rw_slow(pages, start_index, last_index, start_byte % PAGE_SIZE, data, length, flags); } /* * Try to copy using copy_to_user(). We do this as a fast path and * ignore any pinning inconsistencies, unlike a real DMA path. */ if (change_mm) { if (!mmget_not_zero(pages->source_mm)) return iopt_pages_rw_slow(pages, start_index, last_index, start_byte % PAGE_SIZE, data, length, flags); kthread_use_mm(pages->source_mm); } if (flags & IOMMUFD_ACCESS_RW_WRITE) { if (copy_to_user(pages->uptr + start_byte, data, length)) rc = -EFAULT; } else { if (copy_from_user(data, pages->uptr + start_byte, length)) rc = -EFAULT; } if (change_mm) { kthread_unuse_mm(pages->source_mm); mmput(pages->source_mm); } return rc; } static struct iopt_pages_access * iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, unsigned long last) { struct interval_tree_node *node; lockdep_assert_held(&pages->mutex); /* There can be overlapping ranges in this interval tree */ for (node = interval_tree_iter_first(&pages->access_itree, index, last); node; node = interval_tree_iter_next(node, index, last)) if (node->start == index && node->last == last) return container_of(node, struct iopt_pages_access, node); return NULL; } /** * iopt_area_add_access() - Record an in-knerel access for PFNs * @area: The source of PFNs * @start_index: First page index * @last_index: Inclusive last page index * @out_pages: Output list of struct page's representing the PFNs * @flags: IOMMUFD_ACCESS_RW_* flags * @lock_area: Fail userspace munmap on this area * * Record that an in-kernel access will be accessing the pages, ensure they are * pinned, and return the PFNs as a simple list of 'struct page *'. * * This should be undone through a matching call to iopt_area_remove_access() */ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index, unsigned long last_index, struct page **out_pages, unsigned int flags, bool lock_area) { struct iopt_pages *pages = area->pages; struct iopt_pages_access *access; int rc; if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) return -EPERM; mutex_lock(&pages->mutex); access = iopt_pages_get_exact_access(pages, start_index, last_index); if (access) { area->num_accesses++; if (lock_area) area->num_locks++; access->users++; iopt_pages_fill_from_xarray(pages, start_index, last_index, out_pages); mutex_unlock(&pages->mutex); return 0; } access = kzalloc(sizeof(*access), GFP_KERNEL_ACCOUNT); if (!access) { rc = -ENOMEM; goto err_unlock; } rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages); if (rc) goto err_free; access->node.start = start_index; access->node.last = last_index; access->users = 1; area->num_accesses++; if (lock_area) area->num_locks++; interval_tree_insert(&access->node, &pages->access_itree); mutex_unlock(&pages->mutex); return 0; err_free: kfree(access); err_unlock: mutex_unlock(&pages->mutex); return rc; } /** * iopt_area_remove_access() - Release an in-kernel access for PFNs * @area: The source of PFNs * @start_index: First page index * @last_index: Inclusive last page index * @unlock_area: Must match the matching iopt_area_add_access()'s lock_area * * Undo iopt_area_add_access() and unpin the pages if necessary. The caller * must stop using the PFNs before calling this. */ void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index, unsigned long last_index, bool unlock_area) { struct iopt_pages *pages = area->pages; struct iopt_pages_access *access; mutex_lock(&pages->mutex); access = iopt_pages_get_exact_access(pages, start_index, last_index); if (WARN_ON(!access)) goto out_unlock; WARN_ON(area->num_accesses == 0 || access->users == 0); if (unlock_area) { WARN_ON(area->num_locks == 0); area->num_locks--; } area->num_accesses--; access->users--; if (access->users) goto out_unlock; interval_tree_remove(&access->node, &pages->access_itree); iopt_pages_unfill_xarray(pages, start_index, last_index); kfree(access); out_unlock: mutex_unlock(&pages->mutex); }
571 568 568 125 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_cbs.c Credit Based Shaper * * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> */ /* Credit Based Shaper (CBS) * ========================= * * This is a simple rate-limiting shaper aimed at TSN applications on * systems with known traffic workloads. * * Its algorithm is defined by the IEEE 802.1Q-2014 Specification, * Section 8.6.8.2, and explained in more detail in the Annex L of the * same specification. * * There are four tunables to be considered: * * 'idleslope': Idleslope is the rate of credits that is * accumulated (in kilobits per second) when there is at least * one packet waiting for transmission. Packets are transmitted * when the current value of credits is equal or greater than * zero. When there is no packet to be transmitted the amount of * credits is set to zero. This is the main tunable of the CBS * algorithm. * * 'sendslope': * Sendslope is the rate of credits that is depleted (it should be a * negative number of kilobits per second) when a transmission is * ocurring. It can be calculated as follows, (IEEE 802.1Q-2014 Section * 8.6.8.2 item g): * * sendslope = idleslope - port_transmit_rate * * 'hicredit': Hicredit defines the maximum amount of credits (in * bytes) that can be accumulated. Hicredit depends on the * characteristics of interfering traffic, * 'max_interference_size' is the maximum size of any burst of * traffic that can delay the transmission of a frame that is * available for transmission for this traffic class, (IEEE * 802.1Q-2014 Annex L, Equation L-3): * * hicredit = max_interference_size * (idleslope / port_transmit_rate) * * 'locredit': Locredit is the minimum amount of credits that can * be reached. It is a function of the traffic flowing through * this qdisc (IEEE 802.1Q-2014 Annex L, Equation L-2): * * locredit = max_frame_size * (sendslope / port_transmit_rate) */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/units.h> #include <net/netevent.h> #include <net/netlink.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> static LIST_HEAD(cbs_list); static DEFINE_SPINLOCK(cbs_list_lock); struct cbs_sched_data { bool offload; int queue; atomic64_t port_rate; /* in bytes/s */ s64 last; /* timestamp in ns */ s64 credits; /* in bytes */ s32 locredit; /* in bytes */ s32 hicredit; /* in bytes */ s64 sendslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */ struct qdisc_watchdog watchdog; int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free); struct sk_buff *(*dequeue)(struct Qdisc *sch); struct Qdisc *qdisc; struct list_head cbs_list; }; static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct Qdisc *child, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); int err; err = child->ops->enqueue(skb, child, to_free); if (err != NET_XMIT_SUCCESS) return err; sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; } static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; return cbs_child_enqueue(skb, sch, qdisc, to_free); } static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; if (sch->q.qlen == 0 && q->credits > 0) { /* We need to stop accumulating credits when there's * no enqueued packets and q->credits is positive. */ q->credits = 0; q->last = ktime_get_ns(); } return cbs_child_enqueue(skb, sch, qdisc, to_free); } static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); return q->enqueue(skb, sch, to_free); } /* timediff is in ns, slope is in bytes/s */ static s64 timediff_to_credits(s64 timediff, s64 slope) { return div64_s64(timediff * slope, NSEC_PER_SEC); } static s64 delay_from_credits(s64 credits, s64 slope) { if (unlikely(slope == 0)) return S64_MAX; return div64_s64(-credits * NSEC_PER_SEC, slope); } static s64 credits_from_len(unsigned int len, s64 slope, s64 port_rate) { if (unlikely(port_rate == 0)) return S64_MAX; return div64_s64(len * slope, port_rate); } static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child) { struct sk_buff *skb; skb = child->ops->dequeue(child); if (!skb) return NULL; qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; s64 now = ktime_get_ns(); struct sk_buff *skb; s64 credits; int len; /* The previous packet is still being sent */ if (now < q->last) { qdisc_watchdog_schedule_ns(&q->watchdog, q->last); return NULL; } if (q->credits < 0) { credits = timediff_to_credits(now - q->last, q->idleslope); credits = q->credits + credits; q->credits = min_t(s64, credits, q->hicredit); if (q->credits < 0) { s64 delay; delay = delay_from_credits(q->credits, q->idleslope); qdisc_watchdog_schedule_ns(&q->watchdog, now + delay); q->last = now; return NULL; } } skb = cbs_child_dequeue(sch, qdisc); if (!skb) return NULL; len = qdisc_pkt_len(skb); /* As sendslope is a negative number, this will decrease the * amount of q->credits. */ credits = credits_from_len(len, q->sendslope, atomic64_read(&q->port_rate)); credits += q->credits; q->credits = max_t(s64, credits, q->locredit); /* Estimate of the transmission of the last byte of the packet in ns */ if (unlikely(atomic64_read(&q->port_rate) == 0)) q->last = now; else q->last = now + div64_s64(len * NSEC_PER_SEC, atomic64_read(&q->port_rate)); return skb; } static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; return cbs_child_dequeue(sch, qdisc); } static struct sk_buff *cbs_dequeue(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); return q->dequeue(sch); } static const struct nla_policy cbs_policy[TCA_CBS_MAX + 1] = { [TCA_CBS_PARMS] = { .len = sizeof(struct tc_cbs_qopt) }, }; static void cbs_disable_offload(struct net_device *dev, struct cbs_sched_data *q) { struct tc_cbs_qopt_offload cbs = { }; const struct net_device_ops *ops; int err; if (!q->offload) return; q->enqueue = cbs_enqueue_soft; q->dequeue = cbs_dequeue_soft; ops = dev->netdev_ops; if (!ops->ndo_setup_tc) return; cbs.queue = q->queue; cbs.enable = 0; err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); if (err < 0) pr_warn("Couldn't disable CBS offload for queue %d\n", cbs.queue); } static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, const struct tc_cbs_qopt *opt, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; struct tc_cbs_qopt_offload cbs = { }; int err; if (!ops->ndo_setup_tc) { NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload"); return -EOPNOTSUPP; } cbs.queue = q->queue; cbs.enable = 1; cbs.hicredit = opt->hicredit; cbs.locredit = opt->locredit; cbs.idleslope = opt->idleslope; cbs.sendslope = opt->sendslope; err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); if (err < 0) { NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload"); return err; } q->enqueue = cbs_enqueue_offload; q->dequeue = cbs_dequeue_offload; return 0; } static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) { struct ethtool_link_ksettings ecmd; int speed = SPEED_10; s64 port_rate; int err; err = __ethtool_get_link_ksettings(dev, &ecmd); if (err < 0) goto skip; if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) speed = ecmd.base.speed; skip: port_rate = speed * 1000 * BYTES_PER_KBIT; atomic64_set(&q->port_rate, port_rate); netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", dev->name, (long long)atomic64_read(&q->port_rate), ecmd.base.speed); } static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct cbs_sched_data *q; struct net_device *qdev; bool found = false; ASSERT_RTNL(); if (event != NETDEV_UP && event != NETDEV_CHANGE) return NOTIFY_DONE; spin_lock(&cbs_list_lock); list_for_each_entry(q, &cbs_list, cbs_list) { qdev = qdisc_dev(q->qdisc); if (qdev == dev) { found = true; break; } } spin_unlock(&cbs_list_lock); if (found) cbs_set_port_rate(dev, q); return NOTIFY_DONE; } static int cbs_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct nlattr *tb[TCA_CBS_MAX + 1]; struct tc_cbs_qopt *qopt; int err; err = nla_parse_nested_deprecated(tb, TCA_CBS_MAX, opt, cbs_policy, extack); if (err < 0) return err; if (!tb[TCA_CBS_PARMS]) { NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory"); return -EINVAL; } qopt = nla_data(tb[TCA_CBS_PARMS]); if (!qopt->offload) { cbs_set_port_rate(dev, q); cbs_disable_offload(dev, q); } else { err = cbs_enable_offload(dev, q, qopt, extack); if (err < 0) return err; } /* Everything went OK, save the parameters used. */ WRITE_ONCE(q->hicredit, qopt->hicredit); WRITE_ONCE(q->locredit, qopt->locredit); WRITE_ONCE(q->idleslope, qopt->idleslope * BYTES_PER_KBIT); WRITE_ONCE(q->sendslope, qopt->sendslope * BYTES_PER_KBIT); WRITE_ONCE(q->offload, qopt->offload); return 0; } static int cbs_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); if (!opt) { NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory"); return -EINVAL; } q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle, extack); if (!q->qdisc) return -ENOMEM; spin_lock(&cbs_list_lock); list_add(&q->cbs_list, &cbs_list); spin_unlock(&cbs_list_lock); qdisc_hash_add(q->qdisc, false); q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); q->enqueue = cbs_enqueue_soft; q->dequeue = cbs_dequeue_soft; qdisc_watchdog_init(&q->watchdog, sch); return cbs_change(sch, opt, extack); } static void cbs_destroy(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); /* Nothing to do if we couldn't create the underlying qdisc */ if (!q->qdisc) return; qdisc_watchdog_cancel(&q->watchdog); cbs_disable_offload(dev, q); spin_lock(&cbs_list_lock); list_del(&q->cbs_list); spin_unlock(&cbs_list_lock); qdisc_put(q->qdisc); } static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb) { struct cbs_sched_data *q = qdisc_priv(sch); struct tc_cbs_qopt opt = { }; struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (!nest) goto nla_put_failure; opt.hicredit = READ_ONCE(q->hicredit); opt.locredit = READ_ONCE(q->locredit); opt.sendslope = div64_s64(READ_ONCE(q->sendslope), BYTES_PER_KBIT); opt.idleslope = div64_s64(READ_ONCE(q->idleslope), BYTES_PER_KBIT); opt.offload = READ_ONCE(q->offload); if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt)) goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int cbs_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct cbs_sched_data *q = qdisc_priv(sch); if (cl != 1 || !q->qdisc) /* only one class */ return -ENOENT; tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old, struct netlink_ext_ack *extack) { struct cbs_sched_data *q = qdisc_priv(sch); if (!new) { new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle, NULL); if (!new) new = &noop_qdisc; } *old = qdisc_replace(sch, new, &q->qdisc); return 0; } static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg) { struct cbs_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long cbs_find(struct Qdisc *sch, u32 classid) { return 1; } static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { tc_qdisc_stats_dump(sch, 1, walker); } } static const struct Qdisc_class_ops cbs_class_ops = { .graft = cbs_graft, .leaf = cbs_leaf, .find = cbs_find, .walk = cbs_walk, .dump = cbs_dump_class, }; static struct Qdisc_ops cbs_qdisc_ops __read_mostly = { .id = "cbs", .cl_ops = &cbs_class_ops, .priv_size = sizeof(struct cbs_sched_data), .enqueue = cbs_enqueue, .dequeue = cbs_dequeue, .peek = qdisc_peek_dequeued, .init = cbs_init, .reset = qdisc_reset_queue, .destroy = cbs_destroy, .change = cbs_change, .dump = cbs_dump, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("cbs"); static struct notifier_block cbs_device_notifier = { .notifier_call = cbs_dev_notifier, }; static int __init cbs_module_init(void) { int err; err = register_netdevice_notifier(&cbs_device_notifier); if (err) return err; err = register_qdisc(&cbs_qdisc_ops); if (err) unregister_netdevice_notifier(&cbs_device_notifier); return err; } static void __exit cbs_module_exit(void) { unregister_qdisc(&cbs_qdisc_ops); unregister_netdevice_notifier(&cbs_device_notifier); } module_init(cbs_module_init) module_exit(cbs_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Credit Based shaper");
3 3 1 1 1 1 2 1 32 25 2 1 2 27 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 /* * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/in.h> #include <linux/if.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <net/addrconf.h> #include "rds_single_path.h" #include "rds.h" #include "ib.h" #include "ib_mr.h" static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; static atomic_t rds_ib_unloading; module_param(rds_ib_mr_1m_pool_size, int, 0444); MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA"); module_param(rds_ib_mr_8k_pool_size, int, 0444); MODULE_PARM_DESC(rds_ib_mr_8k_pool_size, " Max number of 8K mr per HCA"); module_param(rds_ib_retry_count, int, 0444); MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); /* * we have a clumsy combination of RCU and a rwsem protecting this list * because it is used both in the get_mr fast path and while blocking in * the FMR flushing path. */ DECLARE_RWSEM(rds_ib_devices_lock); struct list_head rds_ib_devices; /* NOTE: if also grabbing ibdev lock, grab this first */ DEFINE_SPINLOCK(ib_nodev_conns_lock); LIST_HEAD(ib_nodev_conns); static void rds_ib_nodev_connect(void) { struct rds_ib_connection *ic; spin_lock(&ib_nodev_conns_lock); list_for_each_entry(ic, &ib_nodev_conns, ib_node) rds_conn_connect_if_down(ic->conn); spin_unlock(&ib_nodev_conns_lock); } static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev) { struct rds_ib_connection *ic; unsigned long flags; spin_lock_irqsave(&rds_ibdev->spinlock, flags); list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) rds_conn_path_drop(&ic->conn->c_path[0], true); spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); } /* * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references * from interrupt context so we push freing off into a work struct in krdsd. */ static void rds_ib_dev_free(struct work_struct *work) { struct rds_ib_ipaddr *i_ipaddr, *i_next; struct rds_ib_device *rds_ibdev = container_of(work, struct rds_ib_device, free_work); if (rds_ibdev->mr_8k_pool) rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool); if (rds_ibdev->mr_1m_pool) rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool); if (rds_ibdev->pd) ib_dealloc_pd(rds_ibdev->pd); list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { list_del(&i_ipaddr->list); kfree(i_ipaddr); } kfree(rds_ibdev->vector_load); kfree(rds_ibdev); } void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) { BUG_ON(refcount_read(&rds_ibdev->refcount) == 0); if (refcount_dec_and_test(&rds_ibdev->refcount)) queue_work(rds_wq, &rds_ibdev->free_work); } static int rds_ib_add_one(struct ib_device *device) { struct rds_ib_device *rds_ibdev; int ret; /* Only handle IB (no iWARP) devices */ if (device->node_type != RDMA_NODE_IB_CA) return -EOPNOTSUPP; /* Device must support FRWR */ if (!(device->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) return -EOPNOTSUPP; rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, ibdev_to_node(device)); if (!rds_ibdev) return -ENOMEM; spin_lock_init(&rds_ibdev->spinlock); refcount_set(&rds_ibdev->refcount, 1); INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); INIT_LIST_HEAD(&rds_ibdev->conn_list); rds_ibdev->max_wrs = device->attrs.max_qp_wr; rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE); rds_ibdev->odp_capable = !!(device->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING) && !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps & IB_ODP_SUPPORT_WRITE) && !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps & IB_ODP_SUPPORT_READ); rds_ibdev->max_1m_mrs = device->attrs.max_mr ? min_t(unsigned int, (device->attrs.max_mr / 2), rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size; rds_ibdev->max_8k_mrs = device->attrs.max_mr ? min_t(unsigned int, ((device->attrs.max_mr / 2) * RDS_MR_8K_SCALE), rds_ib_mr_8k_pool_size) : rds_ib_mr_8k_pool_size; rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom; rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom; rds_ibdev->vector_load = kcalloc(device->num_comp_vectors, sizeof(int), GFP_KERNEL); if (!rds_ibdev->vector_load) { pr_err("RDS/IB: %s failed to allocate vector memory\n", __func__); ret = -ENOMEM; goto put_dev; } rds_ibdev->dev = device; rds_ibdev->pd = ib_alloc_pd(device, 0); if (IS_ERR(rds_ibdev->pd)) { ret = PTR_ERR(rds_ibdev->pd); rds_ibdev->pd = NULL; goto put_dev; } rds_ibdev->mr_1m_pool = rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL); if (IS_ERR(rds_ibdev->mr_1m_pool)) { ret = PTR_ERR(rds_ibdev->mr_1m_pool); rds_ibdev->mr_1m_pool = NULL; goto put_dev; } rds_ibdev->mr_8k_pool = rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL); if (IS_ERR(rds_ibdev->mr_8k_pool)) { ret = PTR_ERR(rds_ibdev->mr_8k_pool); rds_ibdev->mr_8k_pool = NULL; goto put_dev; } rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, max_1m_mrs = %d, max_8k_mrs = %d\n", device->attrs.max_mr, rds_ibdev->max_wrs, rds_ibdev->max_sge, rds_ibdev->max_1m_mrs, rds_ibdev->max_8k_mrs); pr_info("RDS/IB: %s: added\n", device->name); down_write(&rds_ib_devices_lock); list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); up_write(&rds_ib_devices_lock); refcount_inc(&rds_ibdev->refcount); ib_set_client_data(device, &rds_ib_client, rds_ibdev); rds_ib_nodev_connect(); return 0; put_dev: rds_ib_dev_put(rds_ibdev); return ret; } /* * New connections use this to find the device to associate with the * connection. It's not in the fast path so we're not concerned about the * performance of the IB call. (As of this writing, it uses an interrupt * blocking spinlock to serialize walking a per-device list of all registered * clients.) * * RCU is used to handle incoming connections racing with device teardown. * Rather than use a lock to serialize removal from the client_data and * getting a new reference, we use an RCU grace period. The destruction * path removes the device from client_data and then waits for all RCU * readers to finish. * * A new connection can get NULL from this if its arriving on a * device that is in the process of being removed. */ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device) { struct rds_ib_device *rds_ibdev; rcu_read_lock(); rds_ibdev = ib_get_client_data(device, &rds_ib_client); if (rds_ibdev) refcount_inc(&rds_ibdev->refcount); rcu_read_unlock(); return rds_ibdev; } /* * The IB stack is letting us know that a device is going away. This can * happen if the underlying HCA driver is removed or if PCI hotplug is removing * the pci function, for example. * * This can be called at any time and can be racing with any other RDS path. */ static void rds_ib_remove_one(struct ib_device *device, void *client_data) { struct rds_ib_device *rds_ibdev = client_data; rds_ib_dev_shutdown(rds_ibdev); /* stop connection attempts from getting a reference to this device. */ ib_set_client_data(device, &rds_ib_client, NULL); down_write(&rds_ib_devices_lock); list_del_rcu(&rds_ibdev->list); up_write(&rds_ib_devices_lock); /* * This synchronize rcu is waiting for readers of both the ib * client data and the devices list to finish before we drop * both of those references. */ synchronize_rcu(); rds_ib_dev_put(rds_ibdev); rds_ib_dev_put(rds_ibdev); } struct ib_client rds_ib_client = { .name = "rds_ib", .add = rds_ib_add_one, .remove = rds_ib_remove_one }; static int rds_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds_info_rdma_connection *iinfo = buffer; struct rds_ib_connection *ic = conn->c_transport_data; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) return 0; if (conn->c_isv6) return 0; iinfo->src_addr = conn->c_laddr.s6_addr32[3]; iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; if (ic) { iinfo->tos = conn->c_tos; iinfo->sl = ic->i_sl; } memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, (union ib_gid *)&iinfo->dst_gid); rds_ibdev = ic->rds_ibdev; iinfo->max_send_wr = ic->i_send_ring.w_nr; iinfo->max_recv_wr = ic->i_recv_ring.w_nr; iinfo->max_send_sge = rds_ibdev->max_sge; rds_ib_get_mr_info(rds_ibdev, iinfo); iinfo->cache_allocs = atomic_read(&ic->i_cache_allocs); } return 1; } #if IS_ENABLED(CONFIG_IPV6) /* IPv6 version of rds_ib_conn_info_visitor(). */ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds6_info_rdma_connection *iinfo6 = buffer; struct rds_ib_connection *ic = conn->c_transport_data; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) return 0; iinfo6->src_addr = conn->c_laddr; iinfo6->dst_addr = conn->c_faddr; if (ic) { iinfo6->tos = conn->c_tos; iinfo6->sl = ic->i_sl; } memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, (union ib_gid *)&iinfo6->dst_gid); rds_ibdev = ic->rds_ibdev; iinfo6->max_send_wr = ic->i_send_ring.w_nr; iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; iinfo6->max_send_sge = rds_ibdev->max_sge; rds6_ib_get_mr_info(rds_ibdev, iinfo6); iinfo6->cache_allocs = atomic_read(&ic->i_cache_allocs); } return 1; } #endif static void rds_ib_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { u64 buffer[(sizeof(struct rds_info_rdma_connection) + 7) / 8]; rds_for_each_conn_info(sock, len, iter, lens, rds_ib_conn_info_visitor, buffer, sizeof(struct rds_info_rdma_connection)); } #if IS_ENABLED(CONFIG_IPV6) /* IPv6 version of rds_ib_ic_info(). */ static void rds6_ib_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { u64 buffer[(sizeof(struct rds6_info_rdma_connection) + 7) / 8]; rds_for_each_conn_info(sock, len, iter, lens, rds6_ib_conn_info_visitor, buffer, sizeof(struct rds6_info_rdma_connection)); } #endif /* * Early RDS/IB was built to only bind to an address if there is an IPoIB * device with that address set. * * If it were me, I'd advocate for something more flexible. Sending and * receiving should be device-agnostic. Transports would try and maintain * connections between peers who have messages queued. Userspace would be * allowed to influence which paths have priority. We could call userspace * asserting this policy "routing". */ static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, __u32 scope_id) { int ret; struct rdma_cm_id *cm_id; #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sin6; #endif struct sockaddr_in sin; struct sockaddr *sa; bool isv4; isv4 = ipv6_addr_v4mapped(addr); /* Create a CMA ID and try to bind it. This catches both * IB and iWARP capable NICs. */ cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); if (isv4) { memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr->s6_addr32[3]; sa = (struct sockaddr *)&sin; } else { #if IS_ENABLED(CONFIG_IPV6) memset(&sin6, 0, sizeof(sin6)); sin6.sin6_family = AF_INET6; sin6.sin6_addr = *addr; sin6.sin6_scope_id = scope_id; sa = (struct sockaddr *)&sin6; /* XXX Do a special IPv6 link local address check here. The * reason is that rdma_bind_addr() always succeeds with IPv6 * link local address regardless it is indeed configured in a * system. */ if (ipv6_addr_type(addr) & IPV6_ADDR_LINKLOCAL) { struct net_device *dev; if (scope_id == 0) { ret = -EADDRNOTAVAIL; goto out; } /* Use init_net for now as RDS is not network * name space aware. */ dev = dev_get_by_index(&init_net, scope_id); if (!dev) { ret = -EADDRNOTAVAIL; goto out; } if (!ipv6_chk_addr(&init_net, addr, dev, 1)) { dev_put(dev); ret = -EADDRNOTAVAIL; goto out; } dev_put(dev); } #else ret = -EADDRNOTAVAIL; goto out; #endif } /* rdma_bind_addr will only succeed for IB & iWARP devices */ ret = rdma_bind_addr(cm_id, sa); /* due to this, we will claim to support iWARP devices unless we check node_type. */ if (ret || !cm_id->device || cm_id->device->node_type != RDMA_NODE_IB_CA) ret = -EADDRNOTAVAIL; rdsdebug("addr %pI6c%%%u ret %d node type %d\n", addr, scope_id, ret, cm_id->device ? cm_id->device->node_type : -1); out: rdma_destroy_id(cm_id); return ret; } static void rds_ib_unregister_client(void) { ib_unregister_client(&rds_ib_client); /* wait for rds_ib_dev_free() to complete */ flush_workqueue(rds_wq); } static void rds_ib_set_unloading(void) { atomic_set(&rds_ib_unloading, 1); } static bool rds_ib_is_unloading(struct rds_connection *conn) { struct rds_conn_path *cp = &conn->c_path[0]; return (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags) || atomic_read(&rds_ib_unloading) != 0); } void rds_ib_exit(void) { rds_ib_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_deregister_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info); #endif rds_ib_unregister_client(); rds_ib_destroy_nodev_conns(); rds_ib_sysctl_exit(); rds_ib_recv_exit(); rds_trans_unregister(&rds_ib_transport); rds_ib_mr_exit(); } static u8 rds_ib_get_tos_map(u8 tos) { /* 1:1 user to transport map for RDMA transport. * In future, if custom map is desired, hook can export * user configurable map. */ return tos; } struct rds_transport rds_ib_transport = { .laddr_check = rds_ib_laddr_check, .xmit_path_complete = rds_ib_xmit_path_complete, .xmit = rds_ib_xmit, .xmit_rdma = rds_ib_xmit_rdma, .xmit_atomic = rds_ib_xmit_atomic, .recv_path = rds_ib_recv_path, .conn_alloc = rds_ib_conn_alloc, .conn_free = rds_ib_conn_free, .conn_path_connect = rds_ib_conn_path_connect, .conn_path_shutdown = rds_ib_conn_path_shutdown, .inc_copy_to_user = rds_ib_inc_copy_to_user, .inc_free = rds_ib_inc_free, .cm_initiate_connect = rds_ib_cm_initiate_connect, .cm_handle_connect = rds_ib_cm_handle_connect, .cm_connect_complete = rds_ib_cm_connect_complete, .stats_info_copy = rds_ib_stats_info_copy, .exit = rds_ib_exit, .get_mr = rds_ib_get_mr, .sync_mr = rds_ib_sync_mr, .free_mr = rds_ib_free_mr, .flush_mrs = rds_ib_flush_mrs, .get_tos_map = rds_ib_get_tos_map, .t_owner = THIS_MODULE, .t_name = "infiniband", .t_unloading = rds_ib_is_unloading, .t_type = RDS_TRANS_IB }; int rds_ib_init(void) { int ret; INIT_LIST_HEAD(&rds_ib_devices); ret = rds_ib_mr_init(); if (ret) goto out; ret = ib_register_client(&rds_ib_client); if (ret) goto out_mr_exit; ret = rds_ib_sysctl_init(); if (ret) goto out_ibreg; ret = rds_ib_recv_init(); if (ret) goto out_sysctl; rds_trans_register(&rds_ib_transport); rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_register_func(RDS6_INFO_IB_CONNECTIONS, rds6_ib_ic_info); #endif goto out; out_sysctl: rds_ib_sysctl_exit(); out_ibreg: rds_ib_unregister_client(); out_mr_exit: rds_ib_mr_exit(); out: return ret; } MODULE_LICENSE("GPL");
659 669 664 669 454 460 2 2 4 9 9 2 30 30 30 30 30 3 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 /* CPU control. * (C) 2001, 2002, 2003, 2004 Rusty Russell * * This code is licenced under the GPL. */ #include <linux/sched/mm.h> #include <linux/proc_fs.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/notifier.h> #include <linux/sched/signal.h> #include <linux/sched/hotplug.h> #include <linux/sched/isolation.h> #include <linux/sched/task.h> #include <linux/sched/smt.h> #include <linux/unistd.h> #include <linux/cpu.h> #include <linux/oom.h> #include <linux/rcupdate.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/bug.h> #include <linux/kthread.h> #include <linux/stop_machine.h> #include <linux/mutex.h> #include <linux/gfp.h> #include <linux/suspend.h> #include <linux/lockdep.h> #include <linux/tick.h> #include <linux/irq.h> #include <linux/nmi.h> #include <linux/smpboot.h> #include <linux/relay.h> #include <linux/slab.h> #include <linux/scs.h> #include <linux/percpu-rwsem.h> #include <linux/cpuset.h> #include <linux/random.h> #include <linux/cc_platform.h> #include <linux/parser.h> #include <trace/events/power.h> #define CREATE_TRACE_POINTS #include <trace/events/cpuhp.h> #include "smpboot.h" /** * struct cpuhp_cpu_state - Per cpu hotplug state storage * @state: The current cpu state * @target: The target state * @fail: Current CPU hotplug callback state * @thread: Pointer to the hotplug thread * @should_run: Thread should execute * @rollback: Perform a rollback * @single: Single callback invocation * @bringup: Single callback bringup or teardown selector * @node: Remote CPU node; for multi-instance, do a * single entry callback for install/remove * @last: For multi-instance rollback, remember how far we got * @cb_state: The state for a single callback (install/uninstall) * @result: Result of the operation * @ap_sync_state: State for AP synchronization * @done_up: Signal completion to the issuer of the task for cpu-up * @done_down: Signal completion to the issuer of the task for cpu-down */ struct cpuhp_cpu_state { enum cpuhp_state state; enum cpuhp_state target; enum cpuhp_state fail; #ifdef CONFIG_SMP struct task_struct *thread; bool should_run; bool rollback; bool single; bool bringup; struct hlist_node *node; struct hlist_node *last; enum cpuhp_state cb_state; int result; atomic_t ap_sync_state; struct completion done_up; struct completion done_down; #endif }; static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { .fail = CPUHP_INVALID, }; #ifdef CONFIG_SMP cpumask_t cpus_booted_once_mask; #endif #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) static struct lockdep_map cpuhp_state_up_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); static struct lockdep_map cpuhp_state_down_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map); static inline void cpuhp_lock_acquire(bool bringup) { lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); } static inline void cpuhp_lock_release(bool bringup) { lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); } #else static inline void cpuhp_lock_acquire(bool bringup) { } static inline void cpuhp_lock_release(bool bringup) { } #endif /** * struct cpuhp_step - Hotplug state machine step * @name: Name of the step * @startup: Startup function of the step * @teardown: Teardown function of the step * @cant_stop: Bringup/teardown can't be stopped at this step * @multi_instance: State has multiple instances which get added afterwards */ struct cpuhp_step { const char *name; union { int (*single)(unsigned int cpu); int (*multi)(unsigned int cpu, struct hlist_node *node); } startup; union { int (*single)(unsigned int cpu); int (*multi)(unsigned int cpu, struct hlist_node *node); } teardown; /* private: */ struct hlist_head list; /* public: */ bool cant_stop; bool multi_instance; }; static DEFINE_MUTEX(cpuhp_state_mutex); static struct cpuhp_step cpuhp_hp_states[]; static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) { return cpuhp_hp_states + state; } static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step) { return bringup ? !step->startup.single : !step->teardown.single; } /** * cpuhp_invoke_callback - Invoke the callbacks for a given state * @cpu: The cpu for which the callback should be invoked * @state: The state to do callbacks for * @bringup: True if the bringup callback should be invoked * @node: For multi-instance, do a single entry callback for install/remove * @lastp: For multi-instance rollback, remember how far we got * * Called from cpu hotplug and from the state register machinery. * * Return: %0 on success or a negative errno code */ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node, struct hlist_node **lastp) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_step *step = cpuhp_get_step(state); int (*cbm)(unsigned int cpu, struct hlist_node *node); int (*cb)(unsigned int cpu); int ret, cnt; if (st->fail == state) { st->fail = CPUHP_INVALID; return -EAGAIN; } if (cpuhp_step_empty(bringup, step)) { WARN_ON_ONCE(1); return 0; } if (!step->multi_instance) { WARN_ON_ONCE(lastp && *lastp); cb = bringup ? step->startup.single : step->teardown.single; trace_cpuhp_enter(cpu, st->target, state, cb); ret = cb(cpu); trace_cpuhp_exit(cpu, st->state, state, ret); return ret; } cbm = bringup ? step->startup.multi : step->teardown.multi; /* Single invocation for instance add/remove */ if (node) { WARN_ON_ONCE(lastp && *lastp); trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); ret = cbm(cpu, node); trace_cpuhp_exit(cpu, st->state, state, ret); return ret; } /* State transition. Invoke on all instances */ cnt = 0; hlist_for_each(node, &step->list) { if (lastp && node == *lastp) break; trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); ret = cbm(cpu, node); trace_cpuhp_exit(cpu, st->state, state, ret); if (ret) { if (!lastp) goto err; *lastp = node; return ret; } cnt++; } if (lastp) *lastp = NULL; return 0; err: /* Rollback the instances if one failed */ cbm = !bringup ? step->startup.multi : step->teardown.multi; if (!cbm) return ret; hlist_for_each(node, &step->list) { if (!cnt--) break; trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); ret = cbm(cpu, node); trace_cpuhp_exit(cpu, st->state, state, ret); /* * Rollback must not fail, */ WARN_ON_ONCE(ret); } return ret; } #ifdef CONFIG_SMP static bool cpuhp_is_ap_state(enum cpuhp_state state) { /* * The extra check for CPUHP_TEARDOWN_CPU is only for documentation * purposes as that state is handled explicitly in cpu_down. */ return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; } static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup) { struct completion *done = bringup ? &st->done_up : &st->done_down; wait_for_completion(done); } static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) { struct completion *done = bringup ? &st->done_up : &st->done_down; complete(done); } /* * The former STARTING/DYING states, ran with IRQs disabled and must not fail. */ static bool cpuhp_is_atomic_state(enum cpuhp_state state) { return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; } /* Synchronization state management */ enum cpuhp_sync_state { SYNC_STATE_DEAD, SYNC_STATE_KICKED, SYNC_STATE_SHOULD_DIE, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE, SYNC_STATE_ONLINE, }; #ifdef CONFIG_HOTPLUG_CORE_SYNC /** * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown * @state: The synchronization state to set * * No synchronization point. Just update of the synchronization state, but implies * a full barrier so that the AP changes are visible before the control CPU proceeds. */ static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state); (void)atomic_xchg(st, state); } void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); } static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state, enum cpuhp_sync_state next_state) { atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); ktime_t now, end, start = ktime_get(); int sync; end = start + 10ULL * NSEC_PER_SEC; sync = atomic_read(st); while (1) { if (sync == state) { if (!atomic_try_cmpxchg(st, &sync, next_state)) continue; return true; } now = ktime_get(); if (now > end) { /* Timeout. Leave the state unchanged */ return false; } else if (now - start < NSEC_PER_MSEC) { /* Poll for one millisecond */ arch_cpuhp_sync_state_poll(); } else { usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); } sync = atomic_read(st); } return true; } #else /* CONFIG_HOTPLUG_CORE_SYNC */ static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { } #endif /* !CONFIG_HOTPLUG_CORE_SYNC */ #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD /** * cpuhp_ap_report_dead - Update synchronization state to DEAD * * No synchronization point. Just update of the synchronization state. */ void cpuhp_ap_report_dead(void) { cpuhp_ap_update_sync_state(SYNC_STATE_DEAD); } void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { } /* * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down * because the AP cannot issue complete() at this stage. */ static void cpuhp_bp_sync_dead(unsigned int cpu) { atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); int sync = atomic_read(st); do { /* CPU can have reported dead already. Don't overwrite that! */ if (sync == SYNC_STATE_DEAD) break; } while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE)); if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) { /* CPU reached dead state. Invoke the cleanup function */ arch_cpuhp_cleanup_dead_cpu(cpu); return; } /* No further action possible. Emit message and give up. */ pr_err("CPU%u failed to report dead state\n", cpu); } #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */ static inline void cpuhp_bp_sync_dead(unsigned int cpu) { } #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */ #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL /** * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive * * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits * for the BP to release it. */ void cpuhp_ap_sync_alive(void) { atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state); cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE); /* Wait for the control CPU to release it. */ while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE) cpu_relax(); } static bool cpuhp_can_boot_ap(unsigned int cpu) { atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); int sync = atomic_read(st); again: switch (sync) { case SYNC_STATE_DEAD: /* CPU is properly dead */ break; case SYNC_STATE_KICKED: /* CPU did not come up in previous attempt */ break; case SYNC_STATE_ALIVE: /* CPU is stuck cpuhp_ap_sync_alive(). */ break; default: /* CPU failed to report online or dead and is in limbo state. */ return false; } /* Prepare for booting */ if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED)) goto again; return true; } void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { } /* * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up * because the AP cannot issue complete() so early in the bringup. */ static int cpuhp_bp_sync_alive(unsigned int cpu) { int ret = 0; if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL)) return 0; if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) { pr_err("CPU%u failed to report alive state\n", cpu); ret = -EIO; } /* Let the architecture cleanup the kick alive mechanics. */ arch_cpuhp_cleanup_kick_cpu(cpu); return ret; } #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */ static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; } static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; } #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */ /* Serializes the updates to cpu_online_mask, cpu_present_mask */ static DEFINE_MUTEX(cpu_add_remove_lock); bool cpuhp_tasks_frozen; EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); /* * The following two APIs (cpu_maps_update_begin/done) must be used when * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. */ void cpu_maps_update_begin(void) { mutex_lock(&cpu_add_remove_lock); } void cpu_maps_update_done(void) { mutex_unlock(&cpu_add_remove_lock); } /* * If set, cpu_up and cpu_down will return -EBUSY and do nothing. * Should always be manipulated under cpu_add_remove_lock */ static int cpu_hotplug_disabled; #ifdef CONFIG_HOTPLUG_CPU DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); static bool cpu_hotplug_offline_disabled __ro_after_init; void cpus_read_lock(void) { percpu_down_read(&cpu_hotplug_lock); } EXPORT_SYMBOL_GPL(cpus_read_lock); int cpus_read_trylock(void) { return percpu_down_read_trylock(&cpu_hotplug_lock); } EXPORT_SYMBOL_GPL(cpus_read_trylock); void cpus_read_unlock(void) { percpu_up_read(&cpu_hotplug_lock); } EXPORT_SYMBOL_GPL(cpus_read_unlock); void cpus_write_lock(void) { percpu_down_write(&cpu_hotplug_lock); } void cpus_write_unlock(void) { percpu_up_write(&cpu_hotplug_lock); } void lockdep_assert_cpus_held(void) { /* * We can't have hotplug operations before userspace starts running, * and some init codepaths will knowingly not take the hotplug lock. * This is all valid, so mute lockdep until it makes sense to report * unheld locks. */ if (system_state < SYSTEM_RUNNING) return; percpu_rwsem_assert_held(&cpu_hotplug_lock); } EXPORT_SYMBOL_GPL(lockdep_assert_cpus_held); #ifdef CONFIG_LOCKDEP int lockdep_is_cpus_held(void) { return percpu_rwsem_is_held(&cpu_hotplug_lock); } #endif static void lockdep_acquire_cpus_lock(void) { rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_); } static void lockdep_release_cpus_lock(void) { rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_); } /* Declare CPU offlining not supported */ void cpu_hotplug_disable_offlining(void) { cpu_maps_update_begin(); cpu_hotplug_offline_disabled = true; cpu_maps_update_done(); } /* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the * hotplug path before performing hotplug operations. So acquiring that lock * guarantees mutual exclusion from any currently running hotplug operations. */ void cpu_hotplug_disable(void) { cpu_maps_update_begin(); cpu_hotplug_disabled++; cpu_maps_update_done(); } EXPORT_SYMBOL_GPL(cpu_hotplug_disable); static void __cpu_hotplug_enable(void) { if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) return; cpu_hotplug_disabled--; } void cpu_hotplug_enable(void) { cpu_maps_update_begin(); __cpu_hotplug_enable(); cpu_maps_update_done(); } EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #else static void lockdep_acquire_cpus_lock(void) { } static void lockdep_release_cpus_lock(void) { } #endif /* CONFIG_HOTPLUG_CPU */ /* * Architectures that need SMT-specific errata handling during SMT hotplug * should override this. */ void __weak arch_smt_update(void) { } #ifdef CONFIG_HOTPLUG_SMT enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; static unsigned int cpu_smt_max_threads __ro_after_init; unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX; void __init cpu_smt_disable(bool force) { if (!cpu_smt_possible()) return; if (force) { pr_info("SMT: Force disabled\n"); cpu_smt_control = CPU_SMT_FORCE_DISABLED; } else { pr_info("SMT: disabled\n"); cpu_smt_control = CPU_SMT_DISABLED; } cpu_smt_num_threads = 1; } /* * The decision whether SMT is supported can only be done after the full * CPU identification. Called from architecture code. */ void __init cpu_smt_set_num_threads(unsigned int num_threads, unsigned int max_threads) { WARN_ON(!num_threads || (num_threads > max_threads)); if (max_threads == 1) cpu_smt_control = CPU_SMT_NOT_SUPPORTED; cpu_smt_max_threads = max_threads; /* * If SMT has been disabled via the kernel command line or SMT is * not supported, set cpu_smt_num_threads to 1 for consistency. * If enabled, take the architecture requested number of threads * to bring up into account. */ if (cpu_smt_control != CPU_SMT_ENABLED) cpu_smt_num_threads = 1; else if (num_threads < cpu_smt_num_threads) cpu_smt_num_threads = num_threads; } static int __init smt_cmdline_disable(char *str) { cpu_smt_disable(str && !strcmp(str, "force")); return 0; } early_param("nosmt", smt_cmdline_disable); /* * For Archicture supporting partial SMT states check if the thread is allowed. * Otherwise this has already been checked through cpu_smt_max_threads when * setting the SMT level. */ static inline bool cpu_smt_thread_allowed(unsigned int cpu) { #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC return topology_smt_thread_allowed(cpu); #else return true; #endif } static inline bool cpu_bootable(unsigned int cpu) { if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) return true; /* All CPUs are bootable if controls are not configured */ if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED) return true; /* All CPUs are bootable if CPU is not SMT capable */ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) return true; if (topology_is_primary_thread(cpu)) return true; /* * On x86 it's required to boot all logical CPUs at least once so * that the init code can get a chance to set CR4.MCE on each * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any * core will shutdown the machine. */ return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); } /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */ bool cpu_smt_possible(void) { return cpu_smt_control != CPU_SMT_FORCE_DISABLED && cpu_smt_control != CPU_SMT_NOT_SUPPORTED; } EXPORT_SYMBOL_GPL(cpu_smt_possible); #else static inline bool cpu_bootable(unsigned int cpu) { return true; } #endif static inline enum cpuhp_state cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { enum cpuhp_state prev_state = st->state; bool bringup = st->state < target; st->rollback = false; st->last = NULL; st->target = target; st->single = false; st->bringup = bringup; if (cpu_dying(cpu) != !bringup) set_cpu_dying(cpu, !bringup); return prev_state; } static inline void cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state prev_state) { bool bringup = !st->bringup; st->target = prev_state; /* * Already rolling back. No need invert the bringup value or to change * the current state. */ if (st->rollback) return; st->rollback = true; /* * If we have st->last we need to undo partial multi_instance of this * state first. Otherwise start undo at the previous state. */ if (!st->last) { if (st->bringup) st->state--; else st->state++; } st->bringup = bringup; if (cpu_dying(cpu) != !bringup) set_cpu_dying(cpu, !bringup); } /* Regular hotplug invocation of the AP hotplug thread */ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st) { if (!st->single && st->state == st->target) return; st->result = 0; /* * Make sure the above stores are visible before should_run becomes * true. Paired with the mb() above in cpuhp_thread_fun() */ smp_mb(); st->should_run = true; wake_up_process(st->thread); wait_for_ap_thread(st, st->bringup); } static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { enum cpuhp_state prev_state; int ret; prev_state = cpuhp_set_state(cpu, st, target); __cpuhp_kick_ap(st); if ((ret = st->result)) { cpuhp_reset_state(cpu, st, prev_state); __cpuhp_kick_ap(st); } return ret; } static int bringup_wait_for_ap_online(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ wait_for_ap_thread(st, true); if (WARN_ON_ONCE((!cpu_online(cpu)))) return -ECANCELED; /* Unpark the hotplug thread of the target cpu */ kthread_unpark(st->thread); /* * SMT soft disabling on X86 requires to bring the CPU out of the * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The * CPU marked itself as booted_once in notify_cpu_starting() so the * cpu_bootable() check will now return false if this is not the * primary sibling. */ if (!cpu_bootable(cpu)) return -ECANCELED; return 0; } #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP static int cpuhp_kick_ap_alive(unsigned int cpu) { if (!cpuhp_can_boot_ap(cpu)) return -EAGAIN; return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu)); } static int cpuhp_bringup_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int ret; /* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. * Prevent irq alloc/free across the bringup. */ irq_lock_sparse(); ret = cpuhp_bp_sync_alive(cpu); if (ret) goto out_unlock; ret = bringup_wait_for_ap_online(cpu); if (ret) goto out_unlock; irq_unlock_sparse(); if (st->target <= CPUHP_AP_ONLINE_IDLE) return 0; return cpuhp_kick_ap(cpu, st, st->target); out_unlock: irq_unlock_sparse(); return ret; } #else static int bringup_cpu(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct task_struct *idle = idle_thread_get(cpu); int ret; if (!cpuhp_can_boot_ap(cpu)) return -EAGAIN; /* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. * * Prevent irq alloc/free across the bringup by acquiring the * sparse irq lock. Hold it until the upcoming CPU completes the * startup in cpuhp_online_idle() which allows to avoid * intermediate synchronization points in the architecture code. */ irq_lock_sparse(); ret = __cpu_up(cpu, idle); if (ret) goto out_unlock; ret = cpuhp_bp_sync_alive(cpu); if (ret) goto out_unlock; ret = bringup_wait_for_ap_online(cpu); if (ret) goto out_unlock; irq_unlock_sparse(); if (st->target <= CPUHP_AP_ONLINE_IDLE) return 0; return cpuhp_kick_ap(cpu, st, st->target); out_unlock: irq_unlock_sparse(); return ret; } #endif static int finish_cpu(unsigned int cpu) { struct task_struct *idle = idle_thread_get(cpu); struct mm_struct *mm = idle->active_mm; /* * sched_force_init_mm() ensured the use of &init_mm, * drop that refcount now that the CPU has stopped. */ WARN_ON(mm != &init_mm); idle->active_mm = NULL; mmdrop_lazy_tlb(mm); return 0; } /* * Hotplug state machine related functions */ /* * Get the next state to run. Empty ones will be skipped. Returns true if a * state must be run. * * st->state will be modified ahead of time, to match state_to_run, as if it * has already ran. */ static bool cpuhp_next_state(bool bringup, enum cpuhp_state *state_to_run, struct cpuhp_cpu_state *st, enum cpuhp_state target) { do { if (bringup) { if (st->state >= target) return false; *state_to_run = ++st->state; } else { if (st->state <= target) return false; *state_to_run = st->state--; } if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run))) break; } while (true); return true; } static int __cpuhp_invoke_callback_range(bool bringup, unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target, bool nofail) { enum cpuhp_state state; int ret = 0; while (cpuhp_next_state(bringup, &state, st, target)) { int err; err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL); if (!err) continue; if (nofail) { pr_warn("CPU %u %s state %s (%d) failed (%d)\n", cpu, bringup ? "UP" : "DOWN", cpuhp_get_step(st->state)->name, st->state, err); ret = -1; } else { ret = err; break; } } return ret; } static inline int cpuhp_invoke_callback_range(bool bringup, unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false); } static inline void cpuhp_invoke_callback_range_nofail(bool bringup, unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { __cpuhp_invoke_callback_range(bringup, cpu, st, target, true); } static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) { if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) return true; /* * When CPU hotplug is disabled, then taking the CPU down is not * possible because takedown_cpu() and the architecture and * subsystem specific mechanisms are not available. So the CPU * which would be completely unplugged again needs to stay around * in the current state. */ return st->state <= CPUHP_BRINGUP_CPU; } static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { enum cpuhp_state prev_state = st->state; int ret = 0; ret = cpuhp_invoke_callback_range(true, cpu, st, target); if (ret) { pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n", ret, cpu, cpuhp_get_step(st->state)->name, st->state); cpuhp_reset_state(cpu, st, prev_state); if (can_rollback_cpu(st)) WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, prev_state)); } return ret; } /* * The cpu hotplug threads manage the bringup and teardown of the cpus */ static int cpuhp_should_run(unsigned int cpu) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); return st->should_run; } /* * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke * callbacks when a state gets [un]installed at runtime. * * Each invocation of this function by the smpboot thread does a single AP * state callback. * * It has 3 modes of operation: * - single: runs st->cb_state * - up: runs ++st->state, while st->state < st->target * - down: runs st->state--, while st->state > st->target * * When complete or on error, should_run is cleared and the completion is fired. */ static void cpuhp_thread_fun(unsigned int cpu) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); bool bringup = st->bringup; enum cpuhp_state state; if (WARN_ON_ONCE(!st->should_run)) return; /* * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures * that if we see ->should_run we also see the rest of the state. */ smp_mb(); /* * The BP holds the hotplug lock, but we're now running on the AP, * ensure that anybody asserting the lock is held, will actually find * it so. */ lockdep_acquire_cpus_lock(); cpuhp_lock_acquire(bringup); if (st->single) { state = st->cb_state; st->should_run = false; } else { st->should_run = cpuhp_next_state(bringup, &state, st, st->target); if (!st->should_run) goto end; } WARN_ON_ONCE(!cpuhp_is_ap_state(state)); if (cpuhp_is_atomic_state(state)) { local_irq_disable(); st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); local_irq_enable(); /* * STARTING/DYING must not fail! */ WARN_ON_ONCE(st->result); } else { st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); } if (st->result) { /* * If we fail on a rollback, we're up a creek without no * paddle, no way forward, no way back. We loose, thanks for * playing. */ WARN_ON_ONCE(st->rollback); st->should_run = false; } end: cpuhp_lock_release(bringup); lockdep_release_cpus_lock(); if (!st->should_run) complete_ap_thread(st, bringup); } /* Invoke a single callback on a remote cpu */ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int ret; if (!cpu_online(cpu)) return 0; cpuhp_lock_acquire(false); cpuhp_lock_release(false); cpuhp_lock_acquire(true); cpuhp_lock_release(true); /* * If we are up and running, use the hotplug thread. For early calls * we invoke the thread function directly. */ if (!st->thread) return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); st->rollback = false; st->last = NULL; st->node = node; st->bringup = bringup; st->cb_state = state; st->single = true; __cpuhp_kick_ap(st); /* * If we failed and did a partial, do a rollback. */ if ((ret = st->result) && st->last) { st->rollback = true; st->bringup = !bringup; __cpuhp_kick_ap(st); } /* * Clean up the leftovers so the next hotplug operation wont use stale * data. */ st->node = st->last = NULL; return ret; } static int cpuhp_kick_ap_work(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); enum cpuhp_state prev_state = st->state; int ret; cpuhp_lock_acquire(false); cpuhp_lock_release(false); cpuhp_lock_acquire(true); cpuhp_lock_release(true); trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); ret = cpuhp_kick_ap(cpu, st, st->target); trace_cpuhp_exit(cpu, st->state, prev_state, ret); return ret; } static struct smp_hotplug_thread cpuhp_threads = { .store = &cpuhp_state.thread, .thread_should_run = cpuhp_should_run, .thread_fn = cpuhp_thread_fun, .thread_comm = "cpuhp/%u", .selfparking = true, }; static __init void cpuhp_init_state(void) { struct cpuhp_cpu_state *st; int cpu; for_each_possible_cpu(cpu) { st = per_cpu_ptr(&cpuhp_state, cpu); init_completion(&st->done_up); init_completion(&st->done_down); } } void __init cpuhp_threads_init(void) { cpuhp_init_state(); BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); kthread_unpark(this_cpu_read(cpuhp_state.thread)); } #ifdef CONFIG_HOTPLUG_CPU #ifndef arch_clear_mm_cpumask_cpu #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) #endif /** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id * * This function walks all processes, finds a valid mm struct for each one and * then clears a corresponding bit in mm's cpumask. While this all sounds * trivial, there are various non-obvious corner cases, which this function * tries to solve in a safe manner. * * Also note that the function uses a somewhat relaxed locking scheme, so it may * be called only for an already offlined CPU. */ void clear_tasks_mm_cpumask(int cpu) { struct task_struct *p; /* * This function is called after the cpu is taken down and marked * offline, so its not like new tasks will ever get this cpu set in * their mm mask. -- Peter Zijlstra * Thus, we may use rcu_read_lock() here, instead of grabbing * full-fledged tasklist_lock. */ WARN_ON(cpu_online(cpu)); rcu_read_lock(); for_each_process(p) { struct task_struct *t; /* * Main thread might exit, but other threads may still have * a valid mm. Find one. */ t = find_lock_task_mm(p); if (!t) continue; arch_clear_mm_cpumask_cpu(cpu, t->mm); task_unlock(t); } rcu_read_unlock(); } /* Take this CPU down. */ static int take_cpu_down(void *_param) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); int err, cpu = smp_processor_id(); /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) return err; /* * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going * down, that the current state is CPUHP_TEARDOWN_CPU - 1. */ WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1)); /* * Invoke the former CPU_DYING callbacks. DYING must not fail! */ cpuhp_invoke_callback_range_nofail(false, cpu, st, target); /* Park the stopper thread */ stop_machine_park(cpu); return 0; } static int takedown_cpu(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; /* Park the smpboot threads */ kthread_park(st->thread); /* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. */ irq_lock_sparse(); err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); if (err) { /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ kthread_unpark(st->thread); return err; } BUG_ON(cpu_online(cpu)); /* * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed * all runnable tasks from the CPU, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ wait_for_ap_thread(st, false); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); hotplug_cpu__broadcast_tick_pull(cpu); /* This actually kills the CPU. */ __cpu_die(cpu); cpuhp_bp_sync_dead(cpu); lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu)); /* * Callbacks must be re-integrated right away to the RCU state machine. * Otherwise an RCU callback could block a further teardown function * waiting for its completion. */ rcutree_migrate_callbacks(cpu); return 0; } static void cpuhp_complete_idle_dead(void *arg) { struct cpuhp_cpu_state *st = arg; complete_ap_thread(st, false); } void cpuhp_report_idle_dead(void) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); BUG_ON(st->state != CPUHP_AP_OFFLINE); tick_assert_timekeeping_handover(); rcutree_report_cpu_dead(); st->state = CPUHP_AP_IDLE_DEAD; /* * We cannot call complete after rcutree_report_cpu_dead() so we delegate it * to an online cpu. */ smp_call_function_single(cpumask_first(cpu_online_mask), cpuhp_complete_idle_dead, st, 0); } static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { enum cpuhp_state prev_state = st->state; int ret = 0; ret = cpuhp_invoke_callback_range(false, cpu, st, target); if (ret) { pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n", ret, cpu, cpuhp_get_step(st->state)->name, st->state); cpuhp_reset_state(cpu, st, prev_state); if (st->state < prev_state) WARN_ON(cpuhp_invoke_callback_range(true, cpu, st, prev_state)); } return ret; } /* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int prev_state, ret = 0; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_present(cpu)) return -EINVAL; cpus_write_lock(); cpuhp_tasks_frozen = tasks_frozen; prev_state = cpuhp_set_state(cpu, st, target); /* * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread. */ if (st->state > CPUHP_TEARDOWN_CPU) { st->target = max((int)target, CPUHP_TEARDOWN_CPU); ret = cpuhp_kick_ap_work(cpu); /* * The AP side has done the error rollback already. Just * return the error code.. */ if (ret) goto out; /* * We might have stopped still in the range of the AP hotplug * thread. Nothing to do anymore. */ if (st->state > CPUHP_TEARDOWN_CPU) goto out; st->target = target; } /* * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need * to do the further cleanups. */ ret = cpuhp_down_callbacks(cpu, st, target); if (ret && st->state < prev_state) { if (st->state == CPUHP_TEARDOWN_CPU) { cpuhp_reset_state(cpu, st, prev_state); __cpuhp_kick_ap(st); } else { WARN(1, "DEAD callback error for CPU%d", cpu); } } out: cpus_write_unlock(); arch_smt_update(); return ret; } struct cpu_down_work { unsigned int cpu; enum cpuhp_state target; }; static long __cpu_down_maps_locked(void *arg) { struct cpu_down_work *work = arg; return _cpu_down(work->cpu, 0, work->target); } static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) { struct cpu_down_work work = { .cpu = cpu, .target = target, }; /* * If the platform does not support hotplug, report it explicitly to * differentiate it from a transient offlining failure. */ if (cpu_hotplug_offline_disabled) return -EOPNOTSUPP; if (cpu_hotplug_disabled) return -EBUSY; /* * Ensure that the control task does not run on the to be offlined * CPU to prevent a deadlock against cfs_b->period_timer. * Also keep at least one housekeeping cpu onlined to avoid generating * an empty sched_domain span. */ for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) { if (cpu != work.cpu) return work_on_cpu(cpu, __cpu_down_maps_locked, &work); } return -EBUSY; } static int cpu_down(unsigned int cpu, enum cpuhp_state target) { int err; cpu_maps_update_begin(); err = cpu_down_maps_locked(cpu, target); cpu_maps_update_done(); return err; } /** * cpu_device_down - Bring down a cpu device * @dev: Pointer to the cpu device to offline * * This function is meant to be used by device core cpu subsystem only. * * Other subsystems should use remove_cpu() instead. * * Return: %0 on success or a negative errno code */ int cpu_device_down(struct device *dev) { return cpu_down(dev->id, CPUHP_OFFLINE); } int remove_cpu(unsigned int cpu) { int ret; lock_device_hotplug(); ret = device_offline(get_cpu_device(cpu)); unlock_device_hotplug(); return ret; } EXPORT_SYMBOL_GPL(remove_cpu); void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { unsigned int cpu; int error; cpu_maps_update_begin(); /* * Make certain the cpu I'm about to reboot on is online. * * This is inline to what migrate_to_reboot_cpu() already do. */ if (!cpu_online(primary_cpu)) primary_cpu = cpumask_first(cpu_online_mask); for_each_online_cpu(cpu) { if (cpu == primary_cpu) continue; error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); if (error) { pr_err("Failed to offline CPU%d - error=%d", cpu, error); break; } } /* * Ensure all but the reboot CPU are offline. */ BUG_ON(num_online_cpus() > 1); /* * Make sure the CPUs won't be enabled by someone else after this * point. Kexec will reboot to a new kernel shortly resetting * everything along the way. */ cpu_hotplug_disabled++; cpu_maps_update_done(); } #else #define takedown_cpu NULL #endif /*CONFIG_HOTPLUG_CPU*/ /** * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU * @cpu: cpu that just started * * It must be called by the arch code on the new cpu, before the new cpu * enables interrupts and before the "boot" cpu returns from __cpu_up(). */ void notify_cpu_starting(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ cpumask_set_cpu(cpu, &cpus_booted_once_mask); /* * STARTING must not fail! */ cpuhp_invoke_callback_range_nofail(true, cpu, st, target); } /* * Called from the idle task. Wake up the controlling task which brings the * hotplug thread of the upcoming CPU up and then delegates the rest of the * online bringup to the hotplug thread. */ void cpuhp_online_idle(enum cpuhp_state state) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); /* Happens for the boot cpu */ if (state != CPUHP_AP_ONLINE_IDLE) return; cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE); /* * Unpark the stopper thread before we start the idle loop (and start * scheduling); this ensures the stopper task is always available. */ stop_machine_unpark(smp_processor_id()); st->state = CPUHP_AP_ONLINE_IDLE; complete_ap_thread(st, true); } /* Requires cpu_add_remove_lock to be held */ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct task_struct *idle; int ret = 0; cpus_write_lock(); if (!cpu_present(cpu)) { ret = -EINVAL; goto out; } /* * The caller of cpu_up() might have raced with another * caller. Nothing to do. */ if (st->state >= target) goto out; if (st->state == CPUHP_OFFLINE) { /* Let it fail before we try to bring the cpu up */ idle = idle_thread_get(cpu); if (IS_ERR(idle)) { ret = PTR_ERR(idle); goto out; } /* * Reset stale stack state from the last time this CPU was online. */ scs_task_reset(idle); kasan_unpoison_task_stack(idle); } cpuhp_tasks_frozen = tasks_frozen; cpuhp_set_state(cpu, st, target); /* * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread once more. */ if (st->state > CPUHP_BRINGUP_CPU) { ret = cpuhp_kick_ap_work(cpu); /* * The AP side has done the error rollback already. Just * return the error code.. */ if (ret) goto out; } /* * Try to reach the target state. We max out on the BP at * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is * responsible for bringing it up to the target state. */ target = min((int)target, CPUHP_BRINGUP_CPU); ret = cpuhp_up_callbacks(cpu, st, target); out: cpus_write_unlock(); arch_smt_update(); return ret; } static int cpu_up(unsigned int cpu, enum cpuhp_state target) { int err = 0; if (!cpu_possible(cpu)) { pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", cpu); return -EINVAL; } err = try_online_node(cpu_to_node(cpu)); if (err) return err; cpu_maps_update_begin(); if (cpu_hotplug_disabled) { err = -EBUSY; goto out; } if (!cpu_bootable(cpu)) { err = -EPERM; goto out; } err = _cpu_up(cpu, 0, target); out: cpu_maps_update_done(); return err; } /** * cpu_device_up - Bring up a cpu device * @dev: Pointer to the cpu device to online * * This function is meant to be used by device core cpu subsystem only. * * Other subsystems should use add_cpu() instead. * * Return: %0 on success or a negative errno code */ int cpu_device_up(struct device *dev) { return cpu_up(dev->id, CPUHP_ONLINE); } int add_cpu(unsigned int cpu) { int ret; lock_device_hotplug(); ret = device_online(get_cpu_device(cpu)); unlock_device_hotplug(); return ret; } EXPORT_SYMBOL_GPL(add_cpu); /** * bringup_hibernate_cpu - Bring up the CPU that we hibernated on * @sleep_cpu: The cpu we hibernated on and should be brought up. * * On some architectures like arm64, we can hibernate on any CPU, but on * wake up the CPU we hibernated on might be offline as a side effect of * using maxcpus= for example. * * Return: %0 on success or a negative errno code */ int bringup_hibernate_cpu(unsigned int sleep_cpu) { int ret; if (!cpu_online(sleep_cpu)) { pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); ret = cpu_up(sleep_cpu, CPUHP_ONLINE); if (ret) { pr_err("Failed to bring hibernate-CPU up!\n"); return ret; } } return 0; } static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus, enum cpuhp_state target) { unsigned int cpu; for_each_cpu(cpu, mask) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); if (cpu_up(cpu, target) && can_rollback_cpu(st)) { /* * If this failed then cpu_up() might have only * rolled back to CPUHP_BP_KICK_AP for the final * online. Clean it up. NOOP if already rolled back. */ WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE)); } if (!--ncpus) break; } } #ifdef CONFIG_HOTPLUG_PARALLEL static bool __cpuhp_parallel_bringup __ro_after_init = true; static int __init parallel_bringup_parse_param(char *arg) { return kstrtobool(arg, &__cpuhp_parallel_bringup); } early_param("cpuhp.parallel", parallel_bringup_parse_param); #ifdef CONFIG_HOTPLUG_SMT static inline bool cpuhp_smt_aware(void) { return cpu_smt_max_threads > 1; } static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) { return cpu_primary_thread_mask; } #else static inline bool cpuhp_smt_aware(void) { return false; } static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) { return cpu_none_mask; } #endif bool __weak arch_cpuhp_init_parallel_bringup(void) { return true; } /* * On architectures which have enabled parallel bringup this invokes all BP * prepare states for each of the to be onlined APs first. The last state * sends the startup IPI to the APs. The APs proceed through the low level * bringup code in parallel and then wait for the control CPU to release * them one by one for the final onlining procedure. * * This avoids waiting for each AP to respond to the startup IPI in * CPUHP_BRINGUP_CPU. */ static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus) { const struct cpumask *mask = cpu_present_mask; if (__cpuhp_parallel_bringup) __cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup(); if (!__cpuhp_parallel_bringup) return false; if (cpuhp_smt_aware()) { const struct cpumask *pmask = cpuhp_get_primary_thread_mask(); static struct cpumask tmp_mask __initdata; /* * X86 requires to prevent that SMT siblings stopped while * the primary thread does a microcode update for various * reasons. Bring the primary threads up first. */ cpumask_and(&tmp_mask, mask, pmask); cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP); cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE); /* Account for the online CPUs */ ncpus -= num_online_cpus(); if (!ncpus) return true; /* Create the mask for secondary CPUs */ cpumask_andnot(&tmp_mask, mask, pmask); mask = &tmp_mask; } /* Bring the not-yet started CPUs up */ cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP); cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE); return true; } #else static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; } #endif /* CONFIG_HOTPLUG_PARALLEL */ void __init bringup_nonboot_cpus(unsigned int max_cpus) { if (!max_cpus) return; /* Try parallel bringup optimization if enabled */ if (cpuhp_bringup_cpus_parallel(max_cpus)) return; /* Full per CPU serialized bringup */ cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE); } #ifdef CONFIG_PM_SLEEP_SMP static cpumask_var_t frozen_cpus; int freeze_secondary_cpus(int primary) { int cpu, error = 0; cpu_maps_update_begin(); if (primary == -1) { primary = cpumask_first(cpu_online_mask); if (!housekeeping_cpu(primary, HK_TYPE_TIMER)) primary = housekeeping_any_cpu(HK_TYPE_TIMER); } else { if (!cpu_online(primary)) primary = cpumask_first(cpu_online_mask); } /* * We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time */ cpumask_clear(frozen_cpus); pr_info("Disabling non-boot CPUs ...\n"); for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) { if (!cpu_online(cpu) || cpu == primary) continue; if (pm_wakeup_pending()) { pr_info("Wakeup pending. Abort CPU freeze\n"); error = -EBUSY; break; } trace_suspend_resume(TPS("CPU_OFF"), cpu, true); error = _cpu_down(cpu, 1, CPUHP_OFFLINE); trace_suspend_resume(TPS("CPU_OFF"), cpu, false); if (!error) cpumask_set_cpu(cpu, frozen_cpus); else { pr_err("Error taking CPU%d down: %d\n", cpu, error); break; } } if (!error) BUG_ON(num_online_cpus() > 1); else pr_err("Non-boot CPUs are not disabled\n"); /* * Make sure the CPUs won't be enabled by someone else. We need to do * this even in case of failure as all freeze_secondary_cpus() users are * supposed to do thaw_secondary_cpus() on the failure path. */ cpu_hotplug_disabled++; cpu_maps_update_done(); return error; } void __weak arch_thaw_secondary_cpus_begin(void) { } void __weak arch_thaw_secondary_cpus_end(void) { } void thaw_secondary_cpus(void) { int cpu, error; /* Allow everyone to use the CPU hotplug again */ cpu_maps_update_begin(); __cpu_hotplug_enable(); if (cpumask_empty(frozen_cpus)) goto out; pr_info("Enabling non-boot CPUs ...\n"); arch_thaw_secondary_cpus_begin(); for_each_cpu(cpu, frozen_cpus) { trace_suspend_resume(TPS("CPU_ON"), cpu, true); error = _cpu_up(cpu, 1, CPUHP_ONLINE); trace_suspend_resume(TPS("CPU_ON"), cpu, false); if (!error) { pr_info("CPU%d is up\n", cpu); continue; } pr_warn("Error taking CPU%d up: %d\n", cpu, error); } arch_thaw_secondary_cpus_end(); cpumask_clear(frozen_cpus); out: cpu_maps_update_done(); } static int __init alloc_frozen_cpus(void) { if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) return -ENOMEM; return 0; } core_initcall(alloc_frozen_cpus); /* * When callbacks for CPU hotplug notifications are being executed, we must * ensure that the state of the system with respect to the tasks being frozen * or not, as reported by the notification, remains unchanged *throughout the * duration* of the execution of the callbacks. * Hence we need to prevent the freezer from racing with regular CPU hotplug. * * This synchronization is implemented by mutually excluding regular CPU * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ * Hibernate notifications. */ static int cpu_hotplug_pm_callback(struct notifier_block *nb, unsigned long action, void *ptr) { switch (action) { case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE: cpu_hotplug_disable(); break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: cpu_hotplug_enable(); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static int __init cpu_hotplug_pm_sync_init(void) { /* * cpu_hotplug_pm_callback has higher priority than x86 * bsp_pm_callback which depends on cpu_hotplug_pm_callback * to disable cpu hotplug to avoid cpu hotplug race. */ pm_notifier(cpu_hotplug_pm_callback, 0); return 0; } core_initcall(cpu_hotplug_pm_sync_init); #endif /* CONFIG_PM_SLEEP_SMP */ int __boot_cpu_id; #endif /* CONFIG_SMP */ /* Boot processor state steps */ static struct cpuhp_step cpuhp_hp_states[] = { [CPUHP_OFFLINE] = { .name = "offline", .startup.single = NULL, .teardown.single = NULL, }, #ifdef CONFIG_SMP [CPUHP_CREATE_THREADS]= { .name = "threads:prepare", .startup.single = smpboot_create_threads, .teardown.single = NULL, .cant_stop = true, }, [CPUHP_RANDOM_PREPARE] = { .name = "random:prepare", .startup.single = random_prepare_cpu, .teardown.single = NULL, }, [CPUHP_WORKQUEUE_PREP] = { .name = "workqueue:prepare", .startup.single = workqueue_prepare_cpu, .teardown.single = NULL, }, [CPUHP_HRTIMERS_PREPARE] = { .name = "hrtimers:prepare", .startup.single = hrtimers_prepare_cpu, .teardown.single = NULL, }, [CPUHP_SMPCFD_PREPARE] = { .name = "smpcfd:prepare", .startup.single = smpcfd_prepare_cpu, .teardown.single = smpcfd_dead_cpu, }, [CPUHP_RELAY_PREPARE] = { .name = "relay:prepare", .startup.single = relay_prepare_cpu, .teardown.single = NULL, }, [CPUHP_RCUTREE_PREP] = { .name = "RCU/tree:prepare", .startup.single = rcutree_prepare_cpu, .teardown.single = rcutree_dead_cpu, }, /* * On the tear-down path, timers_dead_cpu() must be invoked * before blk_mq_queue_reinit_notify() from notify_dead(), * otherwise a RCU stall occurs. */ [CPUHP_TIMERS_PREPARE] = { .name = "timers:prepare", .startup.single = timers_prepare_cpu, .teardown.single = timers_dead_cpu, }, #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP /* * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until * the next step will release it. */ [CPUHP_BP_KICK_AP] = { .name = "cpu:kick_ap", .startup.single = cpuhp_kick_ap_alive, }, /* * Waits for the AP to reach cpuhp_ap_sync_alive() and then * releases it for the complete bringup. */ [CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", .startup.single = cpuhp_bringup_ap, .teardown.single = finish_cpu, .cant_stop = true, }, #else /* * All-in-one CPU bringup state which includes the kick alive. */ [CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", .startup.single = bringup_cpu, .teardown.single = finish_cpu, .cant_stop = true, }, #endif /* Final state before CPU kills itself */ [CPUHP_AP_IDLE_DEAD] = { .name = "idle:dead", }, /* * Last state before CPU enters the idle loop to die. Transient state * for synchronization. */ [CPUHP_AP_OFFLINE] = { .name = "ap:offline", .cant_stop = true, }, /* First state is scheduler control. Interrupts are disabled */ [CPUHP_AP_SCHED_STARTING] = { .name = "sched:starting", .startup.single = sched_cpu_starting, .teardown.single = sched_cpu_dying, }, [CPUHP_AP_RCUTREE_DYING] = { .name = "RCU/tree:dying", .startup.single = NULL, .teardown.single = rcutree_dying_cpu, }, [CPUHP_AP_SMPCFD_DYING] = { .name = "smpcfd:dying", .startup.single = NULL, .teardown.single = smpcfd_dying_cpu, }, [CPUHP_AP_HRTIMERS_DYING] = { .name = "hrtimers:dying", .startup.single = hrtimers_cpu_starting, .teardown.single = hrtimers_cpu_dying, }, [CPUHP_AP_TICK_DYING] = { .name = "tick:dying", .startup.single = NULL, .teardown.single = tick_cpu_dying, }, /* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ [CPUHP_AP_ONLINE] = { .name = "ap:online", }, /* * Handled on control processor until the plugged processor manages * this itself. */ [CPUHP_TEARDOWN_CPU] = { .name = "cpu:teardown", .startup.single = NULL, .teardown.single = takedown_cpu, .cant_stop = true, }, [CPUHP_AP_SCHED_WAIT_EMPTY] = { .name = "sched:waitempty", .startup.single = NULL, .teardown.single = sched_cpu_wait_empty, }, /* Handle smpboot threads park/unpark */ [CPUHP_AP_SMPBOOT_THREADS] = { .name = "smpboot/threads:online", .startup.single = smpboot_unpark_threads, .teardown.single = smpboot_park_threads, }, [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { .name = "irq/affinity:online", .startup.single = irq_affinity_online_cpu, .teardown.single = NULL, }, [CPUHP_AP_PERF_ONLINE] = { .name = "perf:online", .startup.single = perf_event_init_cpu, .teardown.single = perf_event_exit_cpu, }, [CPUHP_AP_WATCHDOG_ONLINE] = { .name = "lockup_detector:online", .startup.single = lockup_detector_online_cpu, .teardown.single = lockup_detector_offline_cpu, }, [CPUHP_AP_WORKQUEUE_ONLINE] = { .name = "workqueue:online", .startup.single = workqueue_online_cpu, .teardown.single = workqueue_offline_cpu, }, [CPUHP_AP_RANDOM_ONLINE] = { .name = "random:online", .startup.single = random_online_cpu, .teardown.single = NULL, }, [CPUHP_AP_RCUTREE_ONLINE] = { .name = "RCU/tree:online", .startup.single = rcutree_online_cpu, .teardown.single = rcutree_offline_cpu, }, #endif /* * The dynamically registered state space is here */ #ifdef CONFIG_SMP /* Last state is scheduler control setting the cpu active */ [CPUHP_AP_ACTIVE] = { .name = "sched:active", .startup.single = sched_cpu_activate, .teardown.single = sched_cpu_deactivate, }, #endif /* CPU is fully up and running. */ [CPUHP_ONLINE] = { .name = "online", .startup.single = NULL, .teardown.single = NULL, }, }; /* Sanity check for callbacks */ static int cpuhp_cb_check(enum cpuhp_state state) { if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) return -EINVAL; return 0; } /* * Returns a free for dynamic slot assignment of the Online state. The states * are protected by the cpuhp_slot_states mutex and an empty slot is identified * by having no name assigned. */ static int cpuhp_reserve_state(enum cpuhp_state state) { enum cpuhp_state i, end; struct cpuhp_step *step; switch (state) { case CPUHP_AP_ONLINE_DYN: step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN; end = CPUHP_AP_ONLINE_DYN_END; break; case CPUHP_BP_PREPARE_DYN: step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN; end = CPUHP_BP_PREPARE_DYN_END; break; default: return -EINVAL; } for (i = state; i <= end; i++, step++) { if (!step->name) return i; } WARN(1, "No more dynamic states available for CPU hotplug\n"); return -ENOSPC; } static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance) { /* (Un)Install the callbacks for further cpu hotplug operations */ struct cpuhp_step *sp; int ret = 0; /* * If name is NULL, then the state gets removed. * * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on * the first allocation from these dynamic ranges, so the removal * would trigger a new allocation and clear the wrong (already * empty) state, leaving the callbacks of the to be cleared state * dangling, which causes wreckage on the next hotplug operation. */ if (name && (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN)) { ret = cpuhp_reserve_state(state); if (ret < 0) return ret; state = ret; } sp = cpuhp_get_step(state); if (name && sp->name) return -EBUSY; sp->startup.single = startup; sp->teardown.single = teardown; sp->name = name; sp->multi_instance = multi_instance; INIT_HLIST_HEAD(&sp->list); return ret; } static void *cpuhp_get_teardown_cb(enum cpuhp_state state) { return cpuhp_get_step(state)->teardown.single; } /* * Call the startup/teardown function for a step either on the AP or * on the current CPU. */ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node) { struct cpuhp_step *sp = cpuhp_get_step(state); int ret; /* * If there's nothing to do, we done. * Relies on the union for multi_instance. */ if (cpuhp_step_empty(bringup, sp)) return 0; /* * The non AP bound callbacks can fail on bringup. On teardown * e.g. module removal we crash for now. */ #ifdef CONFIG_SMP if (cpuhp_is_ap_state(state)) ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); else ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); #else ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); #endif BUG_ON(ret && !bringup); return ret; } /* * Called from __cpuhp_setup_state on a recoverable failure. * * Note: The teardown callbacks for rollback are not allowed to fail! */ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, struct hlist_node *node) { int cpu; /* Roll back the already executed steps on the other cpus */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpu >= failedcpu) break; /* Did we invoke the startup call on that cpu ? */ if (cpustate >= state) cpuhp_issue_call(cpu, state, false, node); } } int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, struct hlist_node *node, bool invoke) { struct cpuhp_step *sp; int cpu; int ret; lockdep_assert_cpus_held(); sp = cpuhp_get_step(state); if (sp->multi_instance == false) return -EINVAL; mutex_lock(&cpuhp_state_mutex); if (!invoke || !sp->startup.multi) goto add_node; /* * Try to call the startup callback for each present cpu * depending on the hotplug state of the cpu. */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpustate < state) continue; ret = cpuhp_issue_call(cpu, state, true, node); if (ret) { if (sp->teardown.multi) cpuhp_rollback_install(cpu, state, node); goto unlock; } } add_node: ret = 0; hlist_add_head(node, &sp->list); unlock: mutex_unlock(&cpuhp_state_mutex); return ret; } int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke) { int ret; cpus_read_lock(); ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke); cpus_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); /** * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state * @state: The state to setup * @name: Name of the step * @invoke: If true, the startup function is invoked for cpus where * cpu state >= @state * @startup: startup callback function * @teardown: teardown callback function * @multi_instance: State is set up for multiple instances which get * added afterwards. * * The caller needs to hold cpus read locked while calling this function. * Return: * On success: * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN; * 0 for all other states * On failure: proper (negative) error code */ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance) { int cpu, ret = 0; bool dynstate; lockdep_assert_cpus_held(); if (cpuhp_cb_check(state) || !name) return -EINVAL; mutex_lock(&cpuhp_state_mutex); ret = cpuhp_store_callbacks(state, name, startup, teardown, multi_instance); dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN; if (ret > 0 && dynstate) { state = ret; ret = 0; } if (ret || !invoke || !startup) goto out; /* * Try to call the startup callback for each present cpu * depending on the hotplug state of the cpu. */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpustate < state) continue; ret = cpuhp_issue_call(cpu, state, true, NULL); if (ret) { if (teardown) cpuhp_rollback_install(cpu, state, NULL); cpuhp_store_callbacks(state, NULL, NULL, NULL, false); goto out; } } out: mutex_unlock(&cpuhp_state_mutex); /* * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN, * return the dynamically allocated state in case of success. */ if (!ret && dynstate) return state; return ret; } EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked); int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance) { int ret; cpus_read_lock(); ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, teardown, multi_instance); cpus_read_unlock(); return ret; } EXPORT_SYMBOL(__cpuhp_setup_state); int __cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke) { struct cpuhp_step *sp = cpuhp_get_step(state); int cpu; BUG_ON(cpuhp_cb_check(state)); if (!sp->multi_instance) return -EINVAL; cpus_read_lock(); mutex_lock(&cpuhp_state_mutex); if (!invoke || !cpuhp_get_teardown_cb(state)) goto remove; /* * Call the teardown callback for each present cpu depending * on the hotplug state of the cpu. This function is not * allowed to fail currently! */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpustate >= state) cpuhp_issue_call(cpu, state, false, node); } remove: hlist_del(node); mutex_unlock(&cpuhp_state_mutex); cpus_read_unlock(); return 0; } EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); /** * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state * @state: The state to remove * @invoke: If true, the teardown function is invoked for cpus where * cpu state >= @state * * The caller needs to hold cpus read locked while calling this function. * The teardown callback is currently not allowed to fail. Think * about module removal! */ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke) { struct cpuhp_step *sp = cpuhp_get_step(state); int cpu; BUG_ON(cpuhp_cb_check(state)); lockdep_assert_cpus_held(); mutex_lock(&cpuhp_state_mutex); if (sp->multi_instance) { WARN(!hlist_empty(&sp->list), "Error: Removing state %d which has instances left.\n", state); goto remove; } if (!invoke || !cpuhp_get_teardown_cb(state)) goto remove; /* * Call the teardown callback for each present cpu depending * on the hotplug state of the cpu. This function is not * allowed to fail currently! */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpustate >= state) cpuhp_issue_call(cpu, state, false, NULL); } remove: cpuhp_store_callbacks(state, NULL, NULL, NULL, false); mutex_unlock(&cpuhp_state_mutex); } EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked); void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) { cpus_read_lock(); __cpuhp_remove_state_cpuslocked(state, invoke); cpus_read_unlock(); } EXPORT_SYMBOL(__cpuhp_remove_state); #ifdef CONFIG_HOTPLUG_SMT static void cpuhp_offline_cpu_device(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); dev->offline = true; /* Tell user space about the state change */ kobject_uevent(&dev->kobj, KOBJ_OFFLINE); } static void cpuhp_online_cpu_device(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); dev->offline = false; /* Tell user space about the state change */ kobject_uevent(&dev->kobj, KOBJ_ONLINE); } int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { int cpu, ret = 0; cpu_maps_update_begin(); for_each_online_cpu(cpu) { if (topology_is_primary_thread(cpu)) continue; /* * Disable can be called with CPU_SMT_ENABLED when changing * from a higher to lower number of SMT threads per core. */ if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) continue; ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); if (ret) break; /* * As this needs to hold the cpu maps lock it's impossible * to call device_offline() because that ends up calling * cpu_down() which takes cpu maps lock. cpu maps lock * needs to be held as this might race against in kernel * abusers of the hotplug machinery (thermal management). * * So nothing would update device:offline state. That would * leave the sysfs entry stale and prevent onlining after * smt control has been changed to 'off' again. This is * called under the sysfs hotplug lock, so it is properly * serialized against the regular offline usage. */ cpuhp_offline_cpu_device(cpu); } if (!ret) cpu_smt_control = ctrlval; cpu_maps_update_done(); return ret; } /* Check if the core a CPU belongs to is online */ #if !defined(topology_is_core_online) static inline bool topology_is_core_online(unsigned int cpu) { return true; } #endif int cpuhp_smt_enable(void) { int cpu, ret = 0; cpu_maps_update_begin(); cpu_smt_control = CPU_SMT_ENABLED; for_each_present_cpu(cpu) { /* Skip online CPUs and CPUs on offline nodes */ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) continue; if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu)) continue; ret = _cpu_up(cpu, 0, CPUHP_ONLINE); if (ret) break; /* See comment in cpuhp_smt_disable() */ cpuhp_online_cpu_device(cpu); } cpu_maps_update_done(); return ret; } #endif #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); return sprintf(buf, "%d\n", st->state); } static DEVICE_ATTR_RO(state); static ssize_t target_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); struct cpuhp_step *sp; int target, ret; ret = kstrtoint(buf, 10, &target); if (ret) return ret; #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) return -EINVAL; #else if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) return -EINVAL; #endif ret = lock_device_hotplug_sysfs(); if (ret) return ret; mutex_lock(&cpuhp_state_mutex); sp = cpuhp_get_step(target); ret = !sp->name || sp->cant_stop ? -EINVAL : 0; mutex_unlock(&cpuhp_state_mutex); if (ret) goto out; if (st->state < target) ret = cpu_up(dev->id, target); else if (st->state > target) ret = cpu_down(dev->id, target); else if (WARN_ON(st->target != target)) st->target = target; out: unlock_device_hotplug(); return ret ? ret : count; } static ssize_t target_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); return sprintf(buf, "%d\n", st->target); } static DEVICE_ATTR_RW(target); static ssize_t fail_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); struct cpuhp_step *sp; int fail, ret; ret = kstrtoint(buf, 10, &fail); if (ret) return ret; if (fail == CPUHP_INVALID) { st->fail = fail; return count; } if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) return -EINVAL; /* * Cannot fail STARTING/DYING callbacks. */ if (cpuhp_is_atomic_state(fail)) return -EINVAL; /* * DEAD callbacks cannot fail... * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter * triggering STARTING callbacks, a failure in this state would * hinder rollback. */ if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU) return -EINVAL; /* * Cannot fail anything that doesn't have callbacks. */ mutex_lock(&cpuhp_state_mutex); sp = cpuhp_get_step(fail); if (!sp->startup.single && !sp->teardown.single) ret = -EINVAL; mutex_unlock(&cpuhp_state_mutex); if (ret) return ret; st->fail = fail; return count; } static ssize_t fail_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); return sprintf(buf, "%d\n", st->fail); } static DEVICE_ATTR_RW(fail); static struct attribute *cpuhp_cpu_attrs[] = { &dev_attr_state.attr, &dev_attr_target.attr, &dev_attr_fail.attr, NULL }; static const struct attribute_group cpuhp_cpu_attr_group = { .attrs = cpuhp_cpu_attrs, .name = "hotplug", }; static ssize_t states_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t cur, res = 0; int i; mutex_lock(&cpuhp_state_mutex); for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { struct cpuhp_step *sp = cpuhp_get_step(i); if (sp->name) { cur = sprintf(buf, "%3d: %s\n", i, sp->name); buf += cur; res += cur; } } mutex_unlock(&cpuhp_state_mutex); return res; } static DEVICE_ATTR_RO(states); static struct attribute *cpuhp_cpu_root_attrs[] = { &dev_attr_states.attr, NULL }; static const struct attribute_group cpuhp_cpu_root_attr_group = { .attrs = cpuhp_cpu_root_attrs, .name = "hotplug", }; #ifdef CONFIG_HOTPLUG_SMT static bool cpu_smt_num_threads_valid(unsigned int threads) { if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC)) return threads >= 1 && threads <= cpu_smt_max_threads; return threads == 1 || threads == cpu_smt_max_threads; } static ssize_t __store_smt_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ctrlval, ret, num_threads, orig_threads; bool force_off; if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) return -EPERM; if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) return -ENODEV; if (sysfs_streq(buf, "on")) { ctrlval = CPU_SMT_ENABLED; num_threads = cpu_smt_max_threads; } else if (sysfs_streq(buf, "off")) { ctrlval = CPU_SMT_DISABLED; num_threads = 1; } else if (sysfs_streq(buf, "forceoff")) { ctrlval = CPU_SMT_FORCE_DISABLED; num_threads = 1; } else if (kstrtoint(buf, 10, &num_threads) == 0) { if (num_threads == 1) ctrlval = CPU_SMT_DISABLED; else if (cpu_smt_num_threads_valid(num_threads)) ctrlval = CPU_SMT_ENABLED; else return -EINVAL; } else { return -EINVAL; } ret = lock_device_hotplug_sysfs(); if (ret) return ret; orig_threads = cpu_smt_num_threads; cpu_smt_num_threads = num_threads; force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED; if (num_threads > orig_threads) ret = cpuhp_smt_enable(); else if (num_threads < orig_threads || force_off) ret = cpuhp_smt_disable(ctrlval); unlock_device_hotplug(); return ret ? ret : count; } #else /* !CONFIG_HOTPLUG_SMT */ static ssize_t __store_smt_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return -ENODEV; } #endif /* CONFIG_HOTPLUG_SMT */ static const char *smt_states[] = { [CPU_SMT_ENABLED] = "on", [CPU_SMT_DISABLED] = "off", [CPU_SMT_FORCE_DISABLED] = "forceoff", [CPU_SMT_NOT_SUPPORTED] = "notsupported", [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented", }; static ssize_t control_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *state = smt_states[cpu_smt_control]; #ifdef CONFIG_HOTPLUG_SMT /* * If SMT is enabled but not all threads are enabled then show the * number of threads. If all threads are enabled show "on". Otherwise * show the state name. */ if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_num_threads != cpu_smt_max_threads) return sysfs_emit(buf, "%d\n", cpu_smt_num_threads); #endif return sysfs_emit(buf, "%s\n", state); } static ssize_t control_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return __store_smt_control(dev, attr, buf, count); } static DEVICE_ATTR_RW(control); static ssize_t active_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", sched_smt_active()); } static DEVICE_ATTR_RO(active); static struct attribute *cpuhp_smt_attrs[] = { &dev_attr_control.attr, &dev_attr_active.attr, NULL }; static const struct attribute_group cpuhp_smt_attr_group = { .attrs = cpuhp_smt_attrs, .name = "smt", }; static int __init cpu_smt_sysfs_init(void) { struct device *dev_root; int ret = -ENODEV; dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group); put_device(dev_root); } return ret; } static int __init cpuhp_sysfs_init(void) { struct device *dev_root; int cpu, ret; ret = cpu_smt_sysfs_init(); if (ret) return ret; dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group); put_device(dev_root); if (ret) return ret; } for_each_possible_cpu(cpu) { struct device *dev = get_cpu_device(cpu); if (!dev) continue; ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); if (ret) return ret; } return 0; } device_initcall(cpuhp_sysfs_init); #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */ /* * cpu_bit_bitmap[] is a special, "compressed" data structure that * represents all NR_CPUS bits binary values of 1<<nr. * * It is used by cpumask_of() to get a constant address to a CPU * mask value that has a single bit set only. */ /* cpu_bit_bitmap[0] is empty - so we can back into it */ #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { MASK_DECLARE_8(0), MASK_DECLARE_8(8), MASK_DECLARE_8(16), MASK_DECLARE_8(24), #if BITS_PER_LONG > 32 MASK_DECLARE_8(32), MASK_DECLARE_8(40), MASK_DECLARE_8(48), MASK_DECLARE_8(56), #endif }; EXPORT_SYMBOL_GPL(cpu_bit_bitmap); const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; EXPORT_SYMBOL(cpu_all_bits); #ifdef CONFIG_INIT_ALL_POSSIBLE struct cpumask __cpu_possible_mask __ro_after_init = {CPU_BITS_ALL}; #else struct cpumask __cpu_possible_mask __ro_after_init; #endif EXPORT_SYMBOL(__cpu_possible_mask); struct cpumask __cpu_online_mask __read_mostly; EXPORT_SYMBOL(__cpu_online_mask); struct cpumask __cpu_enabled_mask __read_mostly; EXPORT_SYMBOL(__cpu_enabled_mask); struct cpumask __cpu_present_mask __read_mostly; EXPORT_SYMBOL(__cpu_present_mask); struct cpumask __cpu_active_mask __read_mostly; EXPORT_SYMBOL(__cpu_active_mask); struct cpumask __cpu_dying_mask __read_mostly; EXPORT_SYMBOL(__cpu_dying_mask); atomic_t __num_online_cpus __read_mostly; EXPORT_SYMBOL(__num_online_cpus); void init_cpu_present(const struct cpumask *src) { cpumask_copy(&__cpu_present_mask, src); } void init_cpu_possible(const struct cpumask *src) { cpumask_copy(&__cpu_possible_mask, src); } void set_cpu_online(unsigned int cpu, bool online) { /* * atomic_inc/dec() is required to handle the horrid abuse of this * function by the reboot and kexec code which invoke it from * IPI/NMI broadcasts when shutting down CPUs. Invocation from * regular CPU hotplug is properly serialized. * * Note, that the fact that __num_online_cpus is of type atomic_t * does not protect readers which are not serialized against * concurrent hotplug operations. */ if (online) { if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) atomic_inc(&__num_online_cpus); } else { if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) atomic_dec(&__num_online_cpus); } } /* * Activate the first processor. */ void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ set_cpu_online(cpu, true); set_cpu_active(cpu, true); set_cpu_present(cpu, true); set_cpu_possible(cpu, true); #ifdef CONFIG_SMP __boot_cpu_id = cpu; #endif } /* * Must be called _AFTER_ setting up the per_cpu areas */ void __init boot_cpu_hotplug_init(void) { #ifdef CONFIG_SMP cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask); atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE); #endif this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); this_cpu_write(cpuhp_state.target, CPUHP_ONLINE); } #ifdef CONFIG_CPU_MITIGATIONS /* * All except the cross-thread attack vector are mitigated by default. * Cross-thread mitigation often requires disabling SMT which is expensive * so cross-thread mitigations are only partially enabled by default. * * Guest-to-Host and Guest-to-Guest vectors are only needed if KVM support is * present. */ static bool attack_vectors[NR_CPU_ATTACK_VECTORS] __ro_after_init = { [CPU_MITIGATE_USER_KERNEL] = true, [CPU_MITIGATE_USER_USER] = true, [CPU_MITIGATE_GUEST_HOST] = IS_ENABLED(CONFIG_KVM), [CPU_MITIGATE_GUEST_GUEST] = IS_ENABLED(CONFIG_KVM), }; bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v) { if (v < NR_CPU_ATTACK_VECTORS) return attack_vectors[v]; WARN_ONCE(1, "Invalid attack vector %d\n", v); return false; } /* * There are 3 global options, 'off', 'auto', 'auto,nosmt'. These may optionally * be combined with attack-vector disables which follow them. * * Examples: * mitigations=auto,no_user_kernel,no_user_user,no_cross_thread * mitigations=auto,nosmt,no_guest_host,no_guest_guest * * mitigations=off is equivalent to disabling all attack vectors. */ enum cpu_mitigations { CPU_MITIGATIONS_OFF, CPU_MITIGATIONS_AUTO, CPU_MITIGATIONS_AUTO_NOSMT, }; enum { NO_USER_KERNEL, NO_USER_USER, NO_GUEST_HOST, NO_GUEST_GUEST, NO_CROSS_THREAD, NR_VECTOR_PARAMS, }; enum smt_mitigations smt_mitigations __ro_after_init = SMT_MITIGATIONS_AUTO; static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO; static const match_table_t global_mitigations = { { CPU_MITIGATIONS_AUTO_NOSMT, "auto,nosmt"}, { CPU_MITIGATIONS_AUTO, "auto"}, { CPU_MITIGATIONS_OFF, "off"}, }; static const match_table_t vector_mitigations = { { NO_USER_KERNEL, "no_user_kernel"}, { NO_USER_USER, "no_user_user"}, { NO_GUEST_HOST, "no_guest_host"}, { NO_GUEST_GUEST, "no_guest_guest"}, { NO_CROSS_THREAD, "no_cross_thread"}, { NR_VECTOR_PARAMS, NULL}, }; static int __init mitigations_parse_global_opt(char *arg) { int i; for (i = 0; i < ARRAY_SIZE(global_mitigations); i++) { const char *pattern = global_mitigations[i].pattern; if (!strncmp(arg, pattern, strlen(pattern))) { cpu_mitigations = global_mitigations[i].token; return strlen(pattern); } } return 0; } static int __init mitigations_parse_cmdline(char *arg) { char *s, *p; int len; len = mitigations_parse_global_opt(arg); if (cpu_mitigations_off()) { memset(attack_vectors, 0, sizeof(attack_vectors)); smt_mitigations = SMT_MITIGATIONS_OFF; } else if (cpu_mitigations_auto_nosmt()) { smt_mitigations = SMT_MITIGATIONS_ON; } p = arg + len; if (!*p) return 0; /* Attack vector controls may come after the ',' */ if (*p++ != ',' || !IS_ENABLED(CONFIG_ARCH_HAS_CPU_ATTACK_VECTORS)) { pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", arg); return 0; } while ((s = strsep(&p, ",")) != NULL) { switch (match_token(s, vector_mitigations, NULL)) { case NO_USER_KERNEL: attack_vectors[CPU_MITIGATE_USER_KERNEL] = false; break; case NO_USER_USER: attack_vectors[CPU_MITIGATE_USER_USER] = false; break; case NO_GUEST_HOST: attack_vectors[CPU_MITIGATE_GUEST_HOST] = false; break; case NO_GUEST_GUEST: attack_vectors[CPU_MITIGATE_GUEST_GUEST] = false; break; case NO_CROSS_THREAD: smt_mitigations = SMT_MITIGATIONS_OFF; break; default: pr_crit("Unsupported mitigations options %s\n", s); return 0; } } return 0; } /* mitigations=off */ bool cpu_mitigations_off(void) { return cpu_mitigations == CPU_MITIGATIONS_OFF; } EXPORT_SYMBOL_GPL(cpu_mitigations_off); /* mitigations=auto,nosmt */ bool cpu_mitigations_auto_nosmt(void) { return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; } EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt); #else static int __init mitigations_parse_cmdline(char *arg) { pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n"); return 0; } #endif early_param("mitigations", mitigations_parse_cmdline);
9 9 11 3 4 10 9 10 10 10 10 8 1 13 7 11 13 13 13 1 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 // SPDX-License-Identifier: GPL-2.0-or-later /* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the Netfilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Peter Kese <peter.kese@ijs.si> * * Changes: */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <asm/string.h> #include <linux/kmod.h> #include <linux/sysctl.h> #include <net/ip_vs.h> EXPORT_SYMBOL(ip_vs_scheduler_err); /* * IPVS scheduler list */ static LIST_HEAD(ip_vs_schedulers); /* semaphore for schedulers */ static DEFINE_MUTEX(ip_vs_sched_mutex); /* * Bind a service with a scheduler */ int ip_vs_bind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *scheduler) { int ret; if (scheduler->init_service) { ret = scheduler->init_service(svc); if (ret) { pr_err("%s(): init error\n", __func__); return ret; } } rcu_assign_pointer(svc->scheduler, scheduler); return 0; } /* * Unbind a service with its scheduler */ void ip_vs_unbind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *sched) { struct ip_vs_scheduler *cur_sched; cur_sched = rcu_dereference_protected(svc->scheduler, 1); /* This check proves that old 'sched' was installed */ if (!cur_sched) return; if (sched->done_service) sched->done_service(svc); /* svc->scheduler can be set to NULL only by caller */ } /* * Get scheduler in the scheduler list by name */ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name) { struct ip_vs_scheduler *sched; IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name); mutex_lock(&ip_vs_sched_mutex); list_for_each_entry(sched, &ip_vs_schedulers, n_list) { /* * Test and get the modules atomically */ if (sched->module && !try_module_get(sched->module)) { /* * This scheduler is just deleted */ continue; } if (strcmp(sched_name, sched->name)==0) { /* HIT */ mutex_unlock(&ip_vs_sched_mutex); return sched; } module_put(sched->module); } mutex_unlock(&ip_vs_sched_mutex); return NULL; } /* * Lookup scheduler and try to load it if it doesn't exist */ struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name) { struct ip_vs_scheduler *sched; /* * Search for the scheduler by sched_name */ sched = ip_vs_sched_getbyname(sched_name); /* * If scheduler not found, load the module and search again */ if (sched == NULL) { request_module("ip_vs_%s", sched_name); sched = ip_vs_sched_getbyname(sched_name); } return sched; } void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) { if (scheduler) module_put(scheduler->module); } /* * Common error output helper for schedulers */ void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) { struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); char *sched_name = sched ? sched->name : "none"; if (svc->fwmark) { IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", sched_name, svc->fwmark, svc->fwmark, msg); #ifdef CONFIG_IP_VS_IPV6 } else if (svc->af == AF_INET6) { IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", sched_name, ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), msg); #endif } else { IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", sched_name, ip_vs_proto_name(svc->protocol), &svc->addr.ip, ntohs(svc->port), msg); } } /* * Register a scheduler in the scheduler list */ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) { struct ip_vs_scheduler *sched; if (!scheduler) { pr_err("%s(): NULL arg\n", __func__); return -EINVAL; } if (!scheduler->name) { pr_err("%s(): NULL scheduler_name\n", __func__); return -EINVAL; } /* increase the module use count */ if (!ip_vs_use_count_inc()) return -ENOENT; mutex_lock(&ip_vs_sched_mutex); if (!list_empty(&scheduler->n_list)) { mutex_unlock(&ip_vs_sched_mutex); ip_vs_use_count_dec(); pr_err("%s(): [%s] scheduler already linked\n", __func__, scheduler->name); return -EINVAL; } /* * Make sure that the scheduler with this name doesn't exist * in the scheduler list. */ list_for_each_entry(sched, &ip_vs_schedulers, n_list) { if (strcmp(scheduler->name, sched->name) == 0) { mutex_unlock(&ip_vs_sched_mutex); ip_vs_use_count_dec(); pr_err("%s(): [%s] scheduler already existed " "in the system\n", __func__, scheduler->name); return -EINVAL; } } /* * Add it into the d-linked scheduler list */ list_add(&scheduler->n_list, &ip_vs_schedulers); mutex_unlock(&ip_vs_sched_mutex); pr_info("[%s] scheduler registered.\n", scheduler->name); return 0; } /* * Unregister a scheduler from the scheduler list */ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) { if (!scheduler) { pr_err("%s(): NULL arg\n", __func__); return -EINVAL; } mutex_lock(&ip_vs_sched_mutex); if (list_empty(&scheduler->n_list)) { mutex_unlock(&ip_vs_sched_mutex); pr_err("%s(): [%s] scheduler is not in the list. failed\n", __func__, scheduler->name); return -EINVAL; } /* * Remove it from the d-linked scheduler list */ list_del(&scheduler->n_list); mutex_unlock(&ip_vs_sched_mutex); /* decrease the module use count */ ip_vs_use_count_dec(); pr_info("[%s] scheduler unregistered.\n", scheduler->name); return 0; }
2 31 2 32 4 1 10 2 6 18 2 60 68 1 101 18 123 1 197 73 2 4 62 65 62 9 30 31 28 2 31 20 1 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __NET_CFG80211_H #define __NET_CFG80211_H /* * 802.11 device and configuration interface * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2025 Intel Corporation */ #include <linux/ethtool.h> #include <uapi/linux/rfkill.h> #include <linux/netdevice.h> #include <linux/debugfs.h> #include <linux/list.h> #include <linux/bug.h> #include <linux/netlink.h> #include <linux/skbuff.h> #include <linux/nl80211.h> #include <linux/if_ether.h> #include <linux/ieee80211.h> #include <linux/net.h> #include <linux/rfkill.h> #include <net/regulatory.h> /** * DOC: Introduction * * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges * userspace and drivers, and offers some utility functionality associated * with 802.11. cfg80211 must, directly or indirectly via mac80211, be used * by all modern wireless drivers in Linux, so that they offer a consistent * API through nl80211. For backward compatibility, cfg80211 also offers * wireless extensions to userspace, but hides them from drivers completely. * * Additionally, cfg80211 contains code to help enforce regulatory spectrum * use restrictions. */ /** * DOC: Device registration * * In order for a driver to use cfg80211, it must register the hardware device * with cfg80211. This happens through a number of hardware capability structs * described below. * * The fundamental structure for each device is the 'wiphy', of which each * instance describes a physical wireless device connected to the system. Each * such wiphy can have zero, one, or many virtual interfaces associated with * it, which need to be identified as such by pointing the network interface's * @ieee80211_ptr pointer to a &struct wireless_dev which further describes * the wireless part of the interface. Normally this struct is embedded in the * network interface's private data area. Drivers can optionally allow creating * or destroying virtual interfaces on the fly, but without at least one or the * ability to create some the wireless device isn't useful. * * Each wiphy structure contains device capability information, and also has * a pointer to the various operations the driver offers. The definitions and * structures here describe these capabilities in detail. */ struct wiphy; /* * wireless hardware capability structures */ /** * enum ieee80211_channel_flags - channel flags * * Channel flags set by the regulatory control code. * * @IEEE80211_CHAN_DISABLED: This channel is disabled. * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes * sending probe requests or beaconing. * @IEEE80211_CHAN_PSD: Power spectral density (in dBm) is set for this * channel. * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel * is not permitted. * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel * is not permitted. * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel. * @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band, * this flag indicates that an 80 MHz channel cannot use this * channel as the control or any of the secondary channels. * This may be due to the driver or due to regulatory bandwidth * restrictions. * @IEEE80211_CHAN_NO_160MHZ: If the driver supports 160 MHz on the band, * this flag indicates that an 160 MHz channel cannot use this * channel as the control or any of the secondary channels. * This may be due to the driver or due to regulatory bandwidth * restrictions. * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted * on this channel. * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted * on this channel. * @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel. * @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band, * this flag indicates that a 320 MHz channel cannot use this * channel as the control or any of the secondary channels. * This may be due to the driver or due to regulatory bandwidth * restrictions. * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel. * @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT * @IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT: Client connection with VLP AP * not permitted using this channel * @IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT: Client connection with AFC AP * not permitted using this channel * @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor * mode even in the presence of other (regulatory) restrictions, * even if it is otherwise disabled. * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation * with very low power (VLP), even if otherwise set to NO_IR. * @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel, * even if otherwise set to NO_IR. * @IEEE80211_CHAN_S1G_NO_PRIMARY: Prevents the channel for use as an S1G * primary channel. Does not prevent the wider operating channel * described by the chandef from being used. In order for a 2MHz primary * to be used, both 1MHz subchannels shall not contain this flag. * @IEEE80211_CHAN_NO_4MHZ: 4 MHz bandwidth is not permitted on this channel. * @IEEE80211_CHAN_NO_8MHZ: 8 MHz bandwidth is not permitted on this channel. * @IEEE80211_CHAN_NO_16MHZ: 16 MHz bandwidth is not permitted on this channel. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = BIT(0), IEEE80211_CHAN_NO_IR = BIT(1), IEEE80211_CHAN_PSD = BIT(2), IEEE80211_CHAN_RADAR = BIT(3), IEEE80211_CHAN_NO_HT40PLUS = BIT(4), IEEE80211_CHAN_NO_HT40MINUS = BIT(5), IEEE80211_CHAN_NO_OFDM = BIT(6), IEEE80211_CHAN_NO_80MHZ = BIT(7), IEEE80211_CHAN_NO_160MHZ = BIT(8), IEEE80211_CHAN_INDOOR_ONLY = BIT(9), IEEE80211_CHAN_IR_CONCURRENT = BIT(10), IEEE80211_CHAN_NO_20MHZ = BIT(11), IEEE80211_CHAN_NO_10MHZ = BIT(12), IEEE80211_CHAN_NO_HE = BIT(13), /* can use free bits here */ IEEE80211_CHAN_NO_320MHZ = BIT(19), IEEE80211_CHAN_NO_EHT = BIT(20), IEEE80211_CHAN_DFS_CONCURRENT = BIT(21), IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = BIT(22), IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23), IEEE80211_CHAN_CAN_MONITOR = BIT(24), IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25), IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26), IEEE80211_CHAN_S1G_NO_PRIMARY = BIT(27), IEEE80211_CHAN_NO_4MHZ = BIT(28), IEEE80211_CHAN_NO_8MHZ = BIT(29), IEEE80211_CHAN_NO_16MHZ = BIT(30), }; #define IEEE80211_CHAN_NO_HT40 \ (IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) #define IEEE80211_DFS_MIN_CAC_TIME_MS 60000 #define IEEE80211_DFS_MIN_NOP_TIME_MS (30 * 60 * 1000) /** * struct ieee80211_channel - channel definition * * This structure describes a single channel for use * with cfg80211. * * @center_freq: center frequency in MHz * @freq_offset: offset from @center_freq, in KHz * @hw_value: hardware-specific value for the channel * @flags: channel flags from &enum ieee80211_channel_flags. * @orig_flags: channel flags at registration time, used by regulatory * code to support devices with additional restrictions * @band: band this channel belongs to. * @max_antenna_gain: maximum antenna gain in dBi * @max_power: maximum transmission power (in dBm) * @max_reg_power: maximum regulatory transmission power (in dBm) * @beacon_found: helper to regulatory code to indicate when a beacon * has been found on this channel. Use regulatory_hint_found_beacon() * to enable this, this is useful only on 5 GHz band. * @orig_mag: internal use * @orig_mpwr: internal use * @dfs_state: current state of this channel. Only relevant if radar is required * on this channel. * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered. * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels. * @psd: power spectral density (in dBm) */ struct ieee80211_channel { enum nl80211_band band; u32 center_freq; u16 freq_offset; u16 hw_value; u32 flags; int max_antenna_gain; int max_power; int max_reg_power; bool beacon_found; u32 orig_flags; int orig_mag, orig_mpwr; enum nl80211_dfs_state dfs_state; unsigned long dfs_state_entered; unsigned int dfs_cac_ms; s8 psd; }; /** * enum ieee80211_rate_flags - rate flags * * Hardware/specification flags for rates. These are structured * in a way that allows using the same bitrate structure for * different bands/PHY modes. * * @IEEE80211_RATE_SHORT_PREAMBLE: Hardware can send with short * preamble on this bitrate; only relevant in 2.4GHz band and * with CCK rates. * @IEEE80211_RATE_MANDATORY_A: This bitrate is a mandatory rate * when used with 802.11a (on the 5 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_MANDATORY_B: This bitrate is a mandatory rate * when used with 802.11b (on the 2.4 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_MANDATORY_G: This bitrate is a mandatory rate * when used with 802.11g (on the 2.4 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode. * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode */ enum ieee80211_rate_flags { IEEE80211_RATE_SHORT_PREAMBLE = BIT(0), IEEE80211_RATE_MANDATORY_A = BIT(1), IEEE80211_RATE_MANDATORY_B = BIT(2), IEEE80211_RATE_MANDATORY_G = BIT(3), IEEE80211_RATE_ERP_G = BIT(4), IEEE80211_RATE_SUPPORTS_5MHZ = BIT(5), IEEE80211_RATE_SUPPORTS_10MHZ = BIT(6), }; /** * enum ieee80211_bss_type - BSS type filter * * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS * @IEEE80211_BSS_TYPE_PBSS: Personal BSS * @IEEE80211_BSS_TYPE_IBSS: Independent BSS * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type */ enum ieee80211_bss_type { IEEE80211_BSS_TYPE_ESS, IEEE80211_BSS_TYPE_PBSS, IEEE80211_BSS_TYPE_IBSS, IEEE80211_BSS_TYPE_MBSS, IEEE80211_BSS_TYPE_ANY }; /** * enum ieee80211_privacy - BSS privacy filter * * @IEEE80211_PRIVACY_ON: privacy bit set * @IEEE80211_PRIVACY_OFF: privacy bit clear * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting */ enum ieee80211_privacy { IEEE80211_PRIVACY_ON, IEEE80211_PRIVACY_OFF, IEEE80211_PRIVACY_ANY }; #define IEEE80211_PRIVACY(x) \ ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF) /** * struct ieee80211_rate - bitrate definition * * This structure describes a bitrate that an 802.11 PHY can * operate with. The two values @hw_value and @hw_value_short * are only for driver use when pointers to this structure are * passed around. * * @flags: rate-specific flags from &enum ieee80211_rate_flags * @bitrate: bitrate in units of 100 Kbps * @hw_value: driver/hardware value for this rate * @hw_value_short: driver/hardware value for this rate when * short preamble is used */ struct ieee80211_rate { u32 flags; u16 bitrate; u16 hw_value, hw_value_short; }; /** * struct ieee80211_he_obss_pd - AP settings for spatial reuse * * @enable: is the feature enabled. * @sr_ctrl: The SR Control field of SRP element. * @non_srg_max_offset: non-SRG maximum tx power offset * @min_offset: minimal tx power offset an associated station shall use * @max_offset: maximum tx power offset an associated station shall use * @bss_color_bitmap: bitmap that indicates the BSS color values used by * members of the SRG * @partial_bssid_bitmap: bitmap that indicates the partial BSSID values * used by members of the SRG */ struct ieee80211_he_obss_pd { bool enable; u8 sr_ctrl; u8 non_srg_max_offset; u8 min_offset; u8 max_offset; u8 bss_color_bitmap[8]; u8 partial_bssid_bitmap[8]; }; /** * struct cfg80211_he_bss_color - AP settings for BSS coloring * * @color: the current color. * @enabled: HE BSS color is used * @partial: define the AID equation. */ struct cfg80211_he_bss_color { u8 color; bool enabled; bool partial; }; /** * struct ieee80211_sta_ht_cap - STA's HT capabilities * * This structure describes most essential parameters needed * to describe 802.11n HT capabilities for an STA. * * @ht_supported: is HT supported by the STA * @cap: HT capabilities map as described in 802.11n spec * @ampdu_factor: Maximum A-MPDU length factor * @ampdu_density: Minimum A-MPDU spacing * @mcs: Supported MCS rates */ struct ieee80211_sta_ht_cap { u16 cap; /* use IEEE80211_HT_CAP_ */ bool ht_supported; u8 ampdu_factor; u8 ampdu_density; struct ieee80211_mcs_info mcs; }; /** * struct ieee80211_sta_vht_cap - STA's VHT capabilities * * This structure describes most essential parameters needed * to describe 802.11ac VHT capabilities for an STA. * * @vht_supported: is VHT supported by the STA * @cap: VHT capabilities map as described in 802.11ac spec * @vht_mcs: Supported VHT MCS rates */ struct ieee80211_sta_vht_cap { bool vht_supported; u32 cap; /* use IEEE80211_VHT_CAP_ */ struct ieee80211_vht_mcs_info vht_mcs; }; #define IEEE80211_HE_PPE_THRES_MAX_LEN 25 /** * struct ieee80211_sta_he_cap - STA's HE capabilities * * This structure describes most essential parameters needed * to describe 802.11ax HE capabilities for a STA. * * @has_he: true iff HE data is valid. * @he_cap_elem: Fixed portion of the HE capabilities element. * @he_mcs_nss_supp: The supported NSS/MCS combinations. * @ppe_thres: Holds the PPE Thresholds data. */ struct ieee80211_sta_he_cap { bool has_he; struct ieee80211_he_cap_elem he_cap_elem; struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN]; }; /** * struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS * * See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS * and NSS Set field" * * @only_20mhz: MCS/NSS support for 20 MHz-only STA. * @bw: MCS/NSS support for 80, 160 and 320 MHz * @bw._80: MCS/NSS support for BW <= 80 MHz * @bw._160: MCS/NSS support for BW = 160 MHz * @bw._320: MCS/NSS support for BW = 320 MHz */ struct ieee80211_eht_mcs_nss_supp { union { struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz; struct { struct ieee80211_eht_mcs_nss_supp_bw _80; struct ieee80211_eht_mcs_nss_supp_bw _160; struct ieee80211_eht_mcs_nss_supp_bw _320; } __packed bw; } __packed; } __packed; #define IEEE80211_EHT_PPE_THRES_MAX_LEN 32 /** * struct ieee80211_sta_eht_cap - STA's EHT capabilities * * This structure describes most essential parameters needed * to describe 802.11be EHT capabilities for a STA. * * @has_eht: true iff EHT data is valid. * @eht_cap_elem: Fixed portion of the eht capabilities element. * @eht_mcs_nss_supp: The supported NSS/MCS combinations. * @eht_ppe_thres: Holds the PPE Thresholds data. */ struct ieee80211_sta_eht_cap { bool has_eht; struct ieee80211_eht_cap_elem_fixed eht_cap_elem; struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp; u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN]; }; /* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ #ifdef __CHECKER__ /* * This is used to mark the sband->iftype_data pointer which is supposed * to be an array with special access semantics (per iftype), but a lot * of code got it wrong in the past, so with this marking sparse will be * noisy when the pointer is used directly. */ # define __iftd __attribute__((noderef, address_space(__iftype_data))) #else # define __iftd #endif /* __CHECKER__ */ /** * struct ieee80211_sband_iftype_data - sband data per interface type * * This structure encapsulates sband data that is relevant for the * interface types defined in @types_mask. Each type in the * @types_mask must be unique across all instances of iftype_data. * * @types_mask: interface types mask * @he_cap: holds the HE capabilities * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a * 6 GHz band channel (and 0 may be valid value). * @eht_cap: STA's EHT capabilities * @vendor_elems: vendor element(s) to advertise * @vendor_elems.data: vendor element(s) data * @vendor_elems.len: vendor element(s) length */ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; struct ieee80211_he_6ghz_capa he_6ghz_capa; struct ieee80211_sta_eht_cap eht_cap; struct { const u8 *data; unsigned int len; } vendor_elems; }; /** * enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations * * @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz * @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz * @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz * @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz * @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz * @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz * @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz * @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and * 2.16GHz+2.16GHz * @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and * 4.32GHz + 4.32GHz * @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and * 4.32GHz + 4.32GHz * @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz * and 4.32GHz + 4.32GHz * @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz, * 2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz */ enum ieee80211_edmg_bw_config { IEEE80211_EDMG_BW_CONFIG_4 = 4, IEEE80211_EDMG_BW_CONFIG_5 = 5, IEEE80211_EDMG_BW_CONFIG_6 = 6, IEEE80211_EDMG_BW_CONFIG_7 = 7, IEEE80211_EDMG_BW_CONFIG_8 = 8, IEEE80211_EDMG_BW_CONFIG_9 = 9, IEEE80211_EDMG_BW_CONFIG_10 = 10, IEEE80211_EDMG_BW_CONFIG_11 = 11, IEEE80211_EDMG_BW_CONFIG_12 = 12, IEEE80211_EDMG_BW_CONFIG_13 = 13, IEEE80211_EDMG_BW_CONFIG_14 = 14, IEEE80211_EDMG_BW_CONFIG_15 = 15, }; /** * struct ieee80211_edmg - EDMG configuration * * This structure describes most essential parameters needed * to describe 802.11ay EDMG configuration * * @channels: bitmap that indicates the 2.16 GHz channel(s) * that are allowed to be used for transmissions. * Bit 0 indicates channel 1, bit 1 indicates channel 2, etc. * Set to 0 indicate EDMG not supported. * @bw_config: Channel BW Configuration subfield encodes * the allowed channel bandwidth configurations */ struct ieee80211_edmg { u8 channels; enum ieee80211_edmg_bw_config bw_config; }; /** * struct ieee80211_sta_s1g_cap - STA's S1G capabilities * * This structure describes most essential parameters needed * to describe 802.11ah S1G capabilities for a STA. * * @s1g: is STA an S1G STA * @cap: S1G capabilities information * @nss_mcs: Supported NSS MCS set */ struct ieee80211_sta_s1g_cap { bool s1g; u8 cap[10]; /* use S1G_CAPAB_ */ u8 nss_mcs[5]; }; /** * struct ieee80211_supported_band - frequency band definition * * This structure describes a frequency band a wiphy * is able to operate in. * * @channels: Array of channels the hardware can operate with * in this band. * @band: the band this structure represents * @n_channels: Number of channels in @channels * @bitrates: Array of bitrates the hardware can operate with * in this band. Must be sorted to give a valid "supported * rates" IE, i.e. CCK rates first, then OFDM. * @n_bitrates: Number of bitrates in @bitrates * @ht_cap: HT capabilities in this band * @vht_cap: VHT capabilities in this band * @s1g_cap: S1G capabilities in this band * @edmg_cap: EDMG capabilities in this band * @s1g_cap: S1G capabilities in this band (S1G band only, of course) * @n_iftype_data: number of iftype data entries * @iftype_data: interface type data entries. Note that the bits in * @types_mask inside this structure cannot overlap (i.e. only * one occurrence of each type is allowed across all instances of * iftype_data). */ struct ieee80211_supported_band { struct ieee80211_channel *channels; struct ieee80211_rate *bitrates; enum nl80211_band band; int n_channels; int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_s1g_cap s1g_cap; struct ieee80211_edmg edmg_cap; u16 n_iftype_data; const struct ieee80211_sband_iftype_data __iftd *iftype_data; }; /** * _ieee80211_set_sband_iftype_data - set sband iftype data array * @sband: the sband to initialize * @iftd: the iftype data array pointer * @n_iftd: the length of the iftype data array * * Set the sband iftype data array; use this where the length cannot * be derived from the ARRAY_SIZE() of the argument, but prefer * ieee80211_set_sband_iftype_data() where it can be used. */ static inline void _ieee80211_set_sband_iftype_data(struct ieee80211_supported_band *sband, const struct ieee80211_sband_iftype_data *iftd, u16 n_iftd) { sband->iftype_data = (const void __iftd __force *)iftd; sband->n_iftype_data = n_iftd; } /** * ieee80211_set_sband_iftype_data - set sband iftype data array * @sband: the sband to initialize * @iftd: the iftype data array */ #define ieee80211_set_sband_iftype_data(sband, iftd) \ _ieee80211_set_sband_iftype_data(sband, iftd, ARRAY_SIZE(iftd)) /** * for_each_sband_iftype_data - iterate sband iftype data entries * @sband: the sband whose iftype_data array to iterate * @i: iterator counter * @iftd: iftype data pointer to set */ #define for_each_sband_iftype_data(sband, i, iftd) \ for (i = 0, iftd = (const void __force *)&(sband)->iftype_data[i]; \ i < (sband)->n_iftype_data; \ i++, iftd = (const void __force *)&(sband)->iftype_data[i]) /** * ieee80211_get_sband_iftype_data - return sband data for a given iftype * @sband: the sband to search for the STA on * @iftype: enum nl80211_iftype * * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found */ static inline const struct ieee80211_sband_iftype_data * ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, u8 iftype) { const struct ieee80211_sband_iftype_data *data; int i; if (WARN_ON(iftype >= NUM_NL80211_IFTYPES)) return NULL; if (iftype == NL80211_IFTYPE_AP_VLAN) iftype = NL80211_IFTYPE_AP; for_each_sband_iftype_data(sband, i, data) { if (data->types_mask & BIT(iftype)) return data; } return NULL; } /** * ieee80211_get_he_iftype_cap - return HE capabilities for an sband's iftype * @sband: the sband to search for the iftype on * @iftype: enum nl80211_iftype * * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found */ static inline const struct ieee80211_sta_he_cap * ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband, u8 iftype) { const struct ieee80211_sband_iftype_data *data = ieee80211_get_sband_iftype_data(sband, iftype); if (data && data->he_cap.has_he) return &data->he_cap; return NULL; } /** * ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities * @sband: the sband to search for the STA on * @iftype: the iftype to search for * * Return: the 6GHz capabilities */ static inline __le16 ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband, enum nl80211_iftype iftype) { const struct ieee80211_sband_iftype_data *data = ieee80211_get_sband_iftype_data(sband, iftype); if (WARN_ON(!data || !data->he_cap.has_he)) return 0; return data->he_6ghz_capa.capa; } /** * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype * @sband: the sband to search for the iftype on * @iftype: enum nl80211_iftype * * Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found */ static inline const struct ieee80211_sta_eht_cap * ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband, enum nl80211_iftype iftype) { const struct ieee80211_sband_iftype_data *data = ieee80211_get_sband_iftype_data(sband, iftype); if (data && data->eht_cap.has_eht) return &data->eht_cap; return NULL; } /** * wiphy_read_of_freq_limits - read frequency limits from device tree * * @wiphy: the wireless device to get extra limits for * * Some devices may have extra limitations specified in DT. This may be useful * for chipsets that normally support more bands but are limited due to board * design (e.g. by antennas or external power amplifier). * * This function reads info from DT and uses it to *modify* channels (disable * unavailable ones). It's usually a *bad* idea to use it in drivers with * shared channel data as DT limitations are device specific. You should make * sure to call it only if channels in wiphy are copied and can be modified * without affecting other devices. * * As this function access device node it has to be called after set_wiphy_dev. * It also modifies channels so they have to be set first. * If using this helper, call it before wiphy_register(). */ #ifdef CONFIG_OF void wiphy_read_of_freq_limits(struct wiphy *wiphy); #else /* CONFIG_OF */ static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy) { } #endif /* !CONFIG_OF */ /* * Wireless hardware/device configuration structures and methods */ /** * DOC: Actions and configuration * * Each wireless device and each virtual interface offer a set of configuration * operations and other actions that are invoked by userspace. Each of these * actions is described in the operations structure, and the parameters these * operations use are described separately. * * Additionally, some operations are asynchronous and expect to get status * information via some functions that drivers need to call. * * Scanning and BSS list handling with its associated functionality is described * in a separate chapter. */ #define VHT_MUMIMO_GROUPS_DATA_LEN (WLAN_MEMBERSHIP_LEN +\ WLAN_USER_POSITION_LEN) /** * struct vif_params - describes virtual interface parameters * @flags: monitor interface flags, unchanged if 0, otherwise * %MONITOR_FLAG_CHANGED will be set * @use_4addr: use 4-address frames * @macaddr: address to use for this virtual interface. * If this parameter is set to zero address the driver may * determine the address as needed. * This feature is only fully supported by drivers that enable the * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating ** only p2p devices with specified MAC. * @vht_mumimo_groups: MU-MIMO groupID, used for monitoring MU-MIMO packets * belonging to that MU-MIMO groupID; %NULL if not changed * @vht_mumimo_follow_addr: MU-MIMO follow address, used for monitoring * MU-MIMO packets going to the specified station; %NULL if not changed */ struct vif_params { u32 flags; int use_4addr; u8 macaddr[ETH_ALEN]; const u8 *vht_mumimo_groups; const u8 *vht_mumimo_follow_addr; }; /** * struct key_params - key information * * Information about a key * * @key: key material * @key_len: length of key material * @cipher: cipher suite selector * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used * with the get_key() callback, must be in little endian, * length given by @seq_len. * @seq_len: length of @seq. * @vlan_id: vlan_id for VLAN group key (if nonzero) * @mode: key install mode (RX_TX, NO_TX or SET_TX) */ struct key_params { const u8 *key; const u8 *seq; int key_len; int seq_len; u16 vlan_id; u32 cipher; enum nl80211_key_mode mode; }; /** * struct cfg80211_chan_def - channel definition * @chan: the (control) channel * @width: channel width * @center_freq1: center frequency of first segment * @center_freq2: center frequency of second segment * (only with 80+80 MHz) * @edmg: define the EDMG channels configuration. * If edmg is requested (i.e. the .channels member is non-zero), * chan will define the primary channel and all other * parameters are ignored. * @freq1_offset: offset from @center_freq1, in KHz * @punctured: mask of the punctured 20 MHz subchannels, with * bits turned on being disabled (punctured); numbered * from lower to higher frequency (like in the spec) * @s1g_primary_2mhz: Indicates if the control channel pointed to * by 'chan' exists as a 1MHz primary subchannel within an * S1G 2MHz primary channel. */ struct cfg80211_chan_def { struct ieee80211_channel *chan; enum nl80211_chan_width width; u32 center_freq1; u32 center_freq2; struct ieee80211_edmg edmg; u16 freq1_offset; u16 punctured; bool s1g_primary_2mhz; }; /* * cfg80211_bitrate_mask - masks for bitrate control */ struct cfg80211_bitrate_mask { struct { u32 legacy; u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN]; u16 vht_mcs[NL80211_VHT_NSS_MAX]; u16 he_mcs[NL80211_HE_NSS_MAX]; u16 eht_mcs[NL80211_EHT_NSS_MAX]; enum nl80211_txrate_gi gi; enum nl80211_he_gi he_gi; enum nl80211_eht_gi eht_gi; enum nl80211_he_ltf he_ltf; enum nl80211_eht_ltf eht_ltf; } control[NUM_NL80211_BANDS]; }; /** * struct cfg80211_tid_cfg - TID specific configuration * @config_override: Flag to notify driver to reset TID configuration * of the peer. * @tids: bitmap of TIDs to modify * @mask: bitmap of attributes indicating which parameter changed, * similar to &nl80211_tid_config_supp. * @noack: noack configuration value for the TID * @retry_long: retry count value * @retry_short: retry count value * @ampdu: Enable/Disable MPDU aggregation * @rtscts: Enable/Disable RTS/CTS * @amsdu: Enable/Disable MSDU aggregation * @txrate_type: Tx bitrate mask type * @txrate_mask: Tx bitrate to be applied for the TID */ struct cfg80211_tid_cfg { bool config_override; u8 tids; u64 mask; enum nl80211_tid_config noack; u8 retry_long, retry_short; enum nl80211_tid_config ampdu; enum nl80211_tid_config rtscts; enum nl80211_tid_config amsdu; enum nl80211_tx_rate_setting txrate_type; struct cfg80211_bitrate_mask txrate_mask; }; /** * struct cfg80211_tid_config - TID configuration * @peer: Station's MAC address * @n_tid_conf: Number of TID specific configurations to be applied * @tid_conf: Configuration change info */ struct cfg80211_tid_config { const u8 *peer; u32 n_tid_conf; struct cfg80211_tid_cfg tid_conf[] __counted_by(n_tid_conf); }; /** * struct cfg80211_fils_aad - FILS AAD data * @macaddr: STA MAC address * @kek: FILS KEK * @kek_len: FILS KEK length * @snonce: STA Nonce * @anonce: AP Nonce */ struct cfg80211_fils_aad { const u8 *macaddr; const u8 *kek; u8 kek_len; const u8 *snonce; const u8 *anonce; }; /** * struct cfg80211_set_hw_timestamp - enable/disable HW timestamping * @macaddr: peer MAC address. NULL to enable/disable HW timestamping for all * addresses. * @enable: if set, enable HW timestamping for the specified MAC address. * Otherwise disable HW timestamping for the specified MAC address. */ struct cfg80211_set_hw_timestamp { const u8 *macaddr; bool enable; }; /** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition * * Return: The old channel type (NOHT, HT20, HT40+/-) from a given * chandef, which must have a bandwidth allowing this conversion. */ static inline enum nl80211_channel_type cfg80211_get_chandef_type(const struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: return NL80211_CHAN_NO_HT; case NL80211_CHAN_WIDTH_20: return NL80211_CHAN_HT20; case NL80211_CHAN_WIDTH_40: if (chandef->center_freq1 > chandef->chan->center_freq) return NL80211_CHAN_HT40PLUS; return NL80211_CHAN_HT40MINUS; default: WARN_ON(1); return NL80211_CHAN_NO_HT; } } /** * cfg80211_chandef_create - create channel definition using channel type * @chandef: the channel definition struct to fill * @channel: the control channel * @chantype: the channel type * * Given a channel type, create a channel definition. */ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, struct ieee80211_channel *channel, enum nl80211_channel_type chantype); /** * cfg80211_chandef_identical - check if two channel definitions are identical * @chandef1: first channel definition * @chandef2: second channel definition * * Return: %true if the channels defined by the channel definitions are * identical, %false otherwise. */ static inline bool cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1, const struct cfg80211_chan_def *chandef2) { return (chandef1->chan == chandef2->chan && chandef1->width == chandef2->width && chandef1->center_freq1 == chandef2->center_freq1 && chandef1->freq1_offset == chandef2->freq1_offset && chandef1->center_freq2 == chandef2->center_freq2 && chandef1->punctured == chandef2->punctured); } /** * cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel * * @chandef: the channel definition * * Return: %true if EDMG defined, %false otherwise. */ static inline bool cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef) { return chandef->edmg.channels || chandef->edmg.bw_config; } /** * cfg80211_chandef_is_s1g - check if chandef represents an S1G channel * @chandef: the channel definition * * Return: %true if S1G. */ static inline bool cfg80211_chandef_is_s1g(const struct cfg80211_chan_def *chandef) { return chandef->chan->band == NL80211_BAND_S1GHZ; } /** * cfg80211_chandef_compatible - check if two channel definitions are compatible * @chandef1: first channel definition * @chandef2: second channel definition * * Return: %NULL if the given channel definitions are incompatible, * chandef1 or chandef2 otherwise. */ const struct cfg80211_chan_def * cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1, const struct cfg80211_chan_def *chandef2); /** * nl80211_chan_width_to_mhz - get the channel width in MHz * @chan_width: the channel width from &enum nl80211_chan_width * * Return: channel width in MHz if the chan_width from &enum nl80211_chan_width * is valid. -1 otherwise. */ int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width); /** * cfg80211_chandef_get_width - return chandef width in MHz * @c: chandef to return bandwidth for * Return: channel width in MHz for the given chandef; note that it returns * 80 for 80+80 configurations */ static inline int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) { return nl80211_chan_width_to_mhz(c->width); } /** * cfg80211_chandef_valid - check if a channel definition is valid * @chandef: the channel definition to check * Return: %true if the channel definition is valid. %false otherwise. */ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef); /** * cfg80211_chandef_usable - check if secondary channels can be used * @wiphy: the wiphy to validate against * @chandef: the channel definition to check * @prohibited_flags: the regulatory channel flags that must not be set * Return: %true if secondary channels are usable. %false otherwise. */ bool cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, u32 prohibited_flags); /** * cfg80211_chandef_dfs_required - checks if radar detection is required * @wiphy: the wiphy to validate against * @chandef: the channel definition to check * @iftype: the interface type as specified in &enum nl80211_iftype * Returns: * 1 if radar detection is required, 0 if it is not, < 0 on error */ int cfg80211_chandef_dfs_required(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype); /** * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable and we * can/need start CAC on such channel * @wiphy: the wiphy to validate against * @chandef: the channel definition to check * * Return: true if all channels available and at least * one channel requires CAC (NL80211_DFS_USABLE) */ bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef); /** * cfg80211_chandef_dfs_cac_time - get the DFS CAC time (in ms) for given * channel definition * @wiphy: the wiphy to validate against * @chandef: the channel definition to check * * Returns: DFS CAC time (in ms) which applies for this channel definition */ unsigned int cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef); /** * cfg80211_chandef_primary - calculate primary 40/80/160 MHz freq * @chandef: chandef to calculate for * @primary_chan_width: primary channel width to calculate center for * @punctured: punctured sub-channel bitmap, will be recalculated * according to the new bandwidth, can be %NULL * * Returns: the primary 40/80/160 MHz channel center frequency, or -1 * for errors, updating the punctured bitmap */ int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef, enum nl80211_chan_width primary_chan_width, u16 *punctured); /** * nl80211_send_chandef - sends the channel definition. * @msg: the msg to send channel definition * @chandef: the channel definition to check * * Returns: 0 if sent the channel definition to msg, < 0 on error **/ int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef); /** * ieee80211_chandef_max_power - maximum transmission power for the chandef * * In some regulations, the transmit power may depend on the configured channel * bandwidth which may be defined as dBm/MHz. This function returns the actual * max_power for non-standard (20 MHz) channels. * * @chandef: channel definition for the channel * * Returns: maximum allowed transmission power in dBm for the chandef */ static inline int ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_5: return min(chandef->chan->max_reg_power - 6, chandef->chan->max_power); case NL80211_CHAN_WIDTH_10: return min(chandef->chan->max_reg_power - 3, chandef->chan->max_power); default: break; } return chandef->chan->max_power; } /** * cfg80211_any_usable_channels - check for usable channels * @wiphy: the wiphy to check for * @band_mask: which bands to check on * @prohibited_flags: which channels to not consider usable, * %IEEE80211_CHAN_DISABLED is always taken into account * * Return: %true if usable channels found, %false otherwise */ bool cfg80211_any_usable_channels(struct wiphy *wiphy, unsigned long band_mask, u32 prohibited_flags); /** * enum survey_info_flags - survey information flags * * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in * @SURVEY_INFO_IN_USE: channel is currently being used * @SURVEY_INFO_TIME: active time (in ms) was filled in * @SURVEY_INFO_TIME_BUSY: busy time was filled in * @SURVEY_INFO_TIME_EXT_BUSY: extension channel busy time was filled in * @SURVEY_INFO_TIME_RX: receive time was filled in * @SURVEY_INFO_TIME_TX: transmit time was filled in * @SURVEY_INFO_TIME_SCAN: scan time was filled in * @SURVEY_INFO_TIME_BSS_RX: local BSS receive time was filled in * * Used by the driver to indicate which info in &struct survey_info * it has filled in during the get_survey(). */ enum survey_info_flags { SURVEY_INFO_NOISE_DBM = BIT(0), SURVEY_INFO_IN_USE = BIT(1), SURVEY_INFO_TIME = BIT(2), SURVEY_INFO_TIME_BUSY = BIT(3), SURVEY_INFO_TIME_EXT_BUSY = BIT(4), SURVEY_INFO_TIME_RX = BIT(5), SURVEY_INFO_TIME_TX = BIT(6), SURVEY_INFO_TIME_SCAN = BIT(7), SURVEY_INFO_TIME_BSS_RX = BIT(8), }; /** * struct survey_info - channel survey response * * @channel: the channel this survey record reports, may be %NULL for a single * record to report global statistics * @filled: bitflag of flags from &enum survey_info_flags * @noise: channel noise in dBm. This and all following fields are * optional * @time: amount of time in ms the radio was turn on (on the channel) * @time_busy: amount of time the primary channel was sensed busy * @time_ext_busy: amount of time the extension channel was sensed busy * @time_rx: amount of time the radio spent receiving data * @time_tx: amount of time the radio spent transmitting data * @time_scan: amount of time the radio spent for scanning * @time_bss_rx: amount of time the radio spent receiving data on a local BSS * * Used by dump_survey() to report back per-channel survey information. * * This structure can later be expanded with things like * channel duty cycle etc. */ struct survey_info { struct ieee80211_channel *channel; u64 time; u64 time_busy; u64 time_ext_busy; u64 time_rx; u64 time_tx; u64 time_scan; u64 time_bss_rx; u32 filled; s8 noise; }; #define CFG80211_MAX_NUM_AKM_SUITES 10 /** * struct cfg80211_crypto_settings - Crypto settings * @wpa_versions: indicates which, if any, WPA versions are enabled * (from enum nl80211_wpa_versions) * @cipher_group: group key cipher suite (or 0 if unset) * @n_ciphers_pairwise: number of AP supported unicast ciphers * @ciphers_pairwise: unicast key cipher suites * @n_akm_suites: number of AKM suites * @akm_suites: AKM suites * @control_port: Whether user space controls IEEE 802.1X port, i.e., * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is * required to assume that the port is unauthorized until authorized by * user space. Otherwise, port is marked authorized by default. * @control_port_ethertype: the control port protocol that should be * allowed through even on unauthorized ports * @control_port_no_encrypt: TRUE to prevent encryption of control port * protocol frames. * @control_port_over_nl80211: TRUE if userspace expects to exchange control * port frames over NL80211 instead of the network interface. * @control_port_no_preauth: disables pre-auth rx over the nl80211 control * port for mac80211 * @psk: PSK (for devices supporting 4-way-handshake offload) * @sae_pwd: password for SAE authentication (for devices supporting SAE * offload) * @sae_pwd_len: length of SAE password (for devices supporting SAE offload) * @sae_pwe: The mechanisms allowed for SAE PWE derivation: * * NL80211_SAE_PWE_UNSPECIFIED * Not-specified, used to indicate userspace did not specify any * preference. The driver should follow its internal policy in * such a scenario. * * NL80211_SAE_PWE_HUNT_AND_PECK * Allow hunting-and-pecking loop only * * NL80211_SAE_PWE_HASH_TO_ELEMENT * Allow hash-to-element only * * NL80211_SAE_PWE_BOTH * Allow either hunting-and-pecking loop or hash-to-element */ struct cfg80211_crypto_settings { u32 wpa_versions; u32 cipher_group; int n_ciphers_pairwise; u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES]; int n_akm_suites; u32 akm_suites[CFG80211_MAX_NUM_AKM_SUITES]; bool control_port; __be16 control_port_ethertype; bool control_port_no_encrypt; bool control_port_over_nl80211; bool control_port_no_preauth; const u8 *psk; const u8 *sae_pwd; u8 sae_pwd_len; enum nl80211_sae_pwe_mechanism sae_pwe; }; /** * struct cfg80211_mbssid_config - AP settings for multi bssid * * @tx_wdev: pointer to the transmitted interface in the MBSSID set * @tx_link_id: link ID of the transmitted profile in an MLD. * @index: index of this AP in the multi bssid group. * @ema: set to true if the beacons should be sent out in EMA mode. */ struct cfg80211_mbssid_config { struct wireless_dev *tx_wdev; u8 tx_link_id; u8 index; bool ema; }; /** * struct cfg80211_mbssid_elems - Multiple BSSID elements * * @cnt: Number of elements in array %elems. * * @elem: Array of multiple BSSID element(s) to be added into Beacon frames. * @elem.data: Data for multiple BSSID elements. * @elem.len: Length of data. */ struct cfg80211_mbssid_elems { u8 cnt; struct { const u8 *data; size_t len; } elem[] __counted_by(cnt); }; /** * struct cfg80211_rnr_elems - Reduced neighbor report (RNR) elements * * @cnt: Number of elements in array %elems. * * @elem: Array of RNR element(s) to be added into Beacon frames. * @elem.data: Data for RNR elements. * @elem.len: Length of data. */ struct cfg80211_rnr_elems { u8 cnt; struct { const u8 *data; size_t len; } elem[] __counted_by(cnt); }; /** * struct cfg80211_beacon_data - beacon data * @link_id: the link ID for the AP MLD link sending this beacon * @head: head portion of beacon (before TIM IE) * or %NULL if not changed * @tail: tail portion of beacon (after TIM IE) * or %NULL if not changed * @head_len: length of @head * @tail_len: length of @tail * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL * @beacon_ies_len: length of beacon_ies in octets * @proberesp_ies: extra information element(s) to add into Probe Response * frames or %NULL * @proberesp_ies_len: length of proberesp_ies in octets * @assocresp_ies: extra information element(s) to add into (Re)Association * Response frames or %NULL * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) * @mbssid_ies: multiple BSSID elements * @rnr_ies: reduced neighbor report elements * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) * @lci: Measurement Report element content, starting with Measurement Token * (measurement type 8) * @civicloc: Measurement Report element content, starting with Measurement * Token (measurement type 11) * @lci_len: LCI data length * @civicloc_len: Civic location data length * @he_bss_color: BSS Color settings * @he_bss_color_valid: indicates whether bss color * attribute is present in beacon data or not. */ struct cfg80211_beacon_data { unsigned int link_id; const u8 *head, *tail; const u8 *beacon_ies; const u8 *proberesp_ies; const u8 *assocresp_ies; const u8 *probe_resp; const u8 *lci; const u8 *civicloc; struct cfg80211_mbssid_elems *mbssid_ies; struct cfg80211_rnr_elems *rnr_ies; s8 ftm_responder; size_t head_len, tail_len; size_t beacon_ies_len; size_t proberesp_ies_len; size_t assocresp_ies_len; size_t probe_resp_len; size_t lci_len; size_t civicloc_len; struct cfg80211_he_bss_color he_bss_color; bool he_bss_color_valid; }; struct mac_address { u8 addr[ETH_ALEN]; }; /** * struct cfg80211_acl_data - Access control list data * * @acl_policy: ACL policy to be applied on the station's * entry specified by mac_addr * @n_acl_entries: Number of MAC address entries passed * @mac_addrs: List of MAC addresses of stations to be used for ACL */ struct cfg80211_acl_data { enum nl80211_acl_policy acl_policy; int n_acl_entries; /* Keep it last */ struct mac_address mac_addrs[] __counted_by(n_acl_entries); }; /** * struct cfg80211_fils_discovery - FILS discovery parameters from * IEEE Std 802.11ai-2016, Annex C.3 MIB detail. * * @update: Set to true if the feature configuration should be updated. * @min_interval: Minimum packet interval in TUs (0 - 10000) * @max_interval: Maximum packet interval in TUs (0 - 10000) * @tmpl_len: Template length * @tmpl: Template data for FILS discovery frame including the action * frame headers. */ struct cfg80211_fils_discovery { bool update; u32 min_interval; u32 max_interval; size_t tmpl_len; const u8 *tmpl; }; /** * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe * response parameters in 6GHz. * * @update: Set to true if the feature configuration should be updated. * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive * scanning * @tmpl_len: Template length * @tmpl: Template data for probe response */ struct cfg80211_unsol_bcast_probe_resp { bool update; u32 interval; size_t tmpl_len; const u8 *tmpl; }; /** * struct cfg80211_s1g_short_beacon - S1G short beacon data. * * @update: Set to true if the feature configuration should be updated. * @short_head: Short beacon head. * @short_tail: Short beacon tail. * @short_head_len: Short beacon head len. * @short_tail_len: Short beacon tail len. */ struct cfg80211_s1g_short_beacon { bool update; const u8 *short_head; const u8 *short_tail; size_t short_head_len; size_t short_tail_len; }; /** * struct cfg80211_ap_settings - AP configuration * * Used to configure an AP interface. * * @chandef: defines the channel to use * @beacon: beacon data * @beacon_interval: beacon interval * @dtim_period: DTIM period * @ssid: SSID to be used in the BSS (note: may be %NULL if not provided from * user space) * @ssid_len: length of @ssid * @hidden_ssid: whether to hide the SSID in Beacon/Probe Response frames * @crypto: crypto settings * @privacy: the BSS uses privacy * @auth_type: Authentication type (algorithm) * @inactivity_timeout: time in seconds to determine station's inactivity. * @p2p_ctwindow: P2P CT Window * @p2p_opp_ps: P2P opportunistic PS * @acl: ACL configuration used by the drivers which has support for * MAC address based access control * @pbss: If set, start as a PCP instead of AP. Relevant for DMG * networks. * @beacon_rate: bitrate to be used for beacons * @ht_cap: HT capabilities (or %NULL if HT isn't enabled) * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled) * @he_cap: HE capabilities (or %NULL if HE isn't enabled) * @eht_cap: EHT capabilities (or %NULL if EHT isn't enabled) * @eht_oper: EHT operation IE (or %NULL if EHT isn't enabled) * @ht_required: stations must support HT * @vht_required: stations must support VHT * @twt_responder: Enable Target Wait Time * @he_required: stations must support HE * @sae_h2e_required: stations must support direct H2E technique in SAE * @flags: flags, as defined in &enum nl80211_ap_settings_flags * @he_obss_pd: OBSS Packet Detection settings * @he_oper: HE operation IE (or %NULL if HE isn't enabled) * @fils_discovery: FILS discovery transmission parameters * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters * @mbssid_config: AP settings for multiple bssid * @s1g_long_beacon_period: S1G long beacon period * @s1g_short_beacon: S1G short beacon data */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; struct cfg80211_beacon_data beacon; int beacon_interval, dtim_period; const u8 *ssid; size_t ssid_len; enum nl80211_hidden_ssid hidden_ssid; struct cfg80211_crypto_settings crypto; bool privacy; enum nl80211_auth_type auth_type; int inactivity_timeout; u8 p2p_ctwindow; bool p2p_opp_ps; const struct cfg80211_acl_data *acl; bool pbss; struct cfg80211_bitrate_mask beacon_rate; const struct ieee80211_ht_cap *ht_cap; const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; const struct ieee80211_he_operation *he_oper; const struct ieee80211_eht_cap_elem *eht_cap; const struct ieee80211_eht_operation *eht_oper; bool ht_required, vht_required, he_required, sae_h2e_required; bool twt_responder; u32 flags; struct ieee80211_he_obss_pd he_obss_pd; struct cfg80211_fils_discovery fils_discovery; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; struct cfg80211_mbssid_config mbssid_config; u8 s1g_long_beacon_period; struct cfg80211_s1g_short_beacon s1g_short_beacon; }; /** * struct cfg80211_ap_update - AP configuration update * * Subset of &struct cfg80211_ap_settings, for updating a running AP. * * @beacon: beacon data * @fils_discovery: FILS discovery transmission parameters * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters * @s1g_short_beacon: S1G short beacon data */ struct cfg80211_ap_update { struct cfg80211_beacon_data beacon; struct cfg80211_fils_discovery fils_discovery; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; struct cfg80211_s1g_short_beacon s1g_short_beacon; }; /** * struct cfg80211_csa_settings - channel switch settings * * Used for channel switch * * @chandef: defines the channel to use after the switch * @beacon_csa: beacon data while performing the switch * @counter_offsets_beacon: offsets of the counters within the beacon (tail) * @counter_offsets_presp: offsets of the counters within the probe response * @n_counter_offsets_beacon: number of csa counters the beacon (tail) * @n_counter_offsets_presp: number of csa counters in the probe response * @beacon_after: beacon data to be used on the new channel * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters * @radar_required: whether radar detection is required on the new channel * @block_tx: whether transmissions should be blocked while changing * @count: number of beacons until switch * @link_id: defines the link on which channel switch is expected during * MLO. 0 in case of non-MLO. */ struct cfg80211_csa_settings { struct cfg80211_chan_def chandef; struct cfg80211_beacon_data beacon_csa; const u16 *counter_offsets_beacon; const u16 *counter_offsets_presp; unsigned int n_counter_offsets_beacon; unsigned int n_counter_offsets_presp; struct cfg80211_beacon_data beacon_after; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; bool radar_required; bool block_tx; u8 count; u8 link_id; }; /** * struct cfg80211_color_change_settings - color change settings * * Used for bss color change * * @beacon_color_change: beacon data while performing the color countdown * @counter_offset_beacon: offsets of the counters within the beacon (tail) * @counter_offset_presp: offsets of the counters within the probe response * @beacon_next: beacon data to be used after the color change * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters * @count: number of beacons until the color change * @color: the color used after the change * @link_id: defines the link on which color change is expected during MLO. * 0 in case of non-MLO. */ struct cfg80211_color_change_settings { struct cfg80211_beacon_data beacon_color_change; u16 counter_offset_beacon; u16 counter_offset_presp; struct cfg80211_beacon_data beacon_next; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; u8 count; u8 color; u8 link_id; }; /** * struct iface_combination_params - input parameters for interface combinations * * Used to pass interface combination parameters * * @radio_idx: wiphy radio index or -1 for global * @num_different_channels: the number of different channels we want * to use for verification * @radar_detect: a bitmap where each bit corresponds to a channel * width where radar detection is needed, as in the definition of * &struct ieee80211_iface_combination.@radar_detect_widths * @iftype_num: array with the number of interfaces of each interface * type. The index is the interface type as specified in &enum * nl80211_iftype. * @new_beacon_int: set this to the beacon interval of a new interface * that's not operating yet, if such is to be checked as part of * the verification */ struct iface_combination_params { int radio_idx; int num_different_channels; u8 radar_detect; int iftype_num[NUM_NL80211_IFTYPES]; u32 new_beacon_int; }; /** * enum station_parameters_apply_mask - station parameter values to apply * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state * * Not all station parameters have in-band "no change" signalling, * for those that don't these flags will are used. */ enum station_parameters_apply_mask { STATION_PARAM_APPLY_UAPSD = BIT(0), STATION_PARAM_APPLY_CAPABILITY = BIT(1), STATION_PARAM_APPLY_PLINK_STATE = BIT(2), }; /** * struct sta_txpwr - station txpower configuration * * Used to configure txpower for station. * * @power: tx power (in dBm) to be used for sending data traffic. If tx power * is not provided, the default per-interface tx power setting will be * overriding. Driver should be picking up the lowest tx power, either tx * power per-interface or per-station. * @type: In particular if TPC %type is NL80211_TX_POWER_LIMITED then tx power * will be less than or equal to specified from userspace, whereas if TPC * %type is NL80211_TX_POWER_AUTOMATIC then it indicates default tx power. * NL80211_TX_POWER_FIXED is not a valid configuration option for * per peer TPC. */ struct sta_txpwr { s16 power; enum nl80211_tx_power_setting type; }; /** * struct link_station_parameters - link station parameters * * Used to change and create a new link station. * * @mld_mac: MAC address of the station * @link_id: the link id (-1 for non-MLD station) * @link_mac: MAC address of the link * @supported_rates: supported rates in IEEE 802.11 format * (or NULL for no change) * @supported_rates_len: number of supported rates * @ht_capa: HT capabilities of station * @vht_capa: VHT capabilities of station * @opmode_notif: operating mode field from Operating Mode Notification * @opmode_notif_used: information if operating mode field is used * @he_capa: HE capabilities of station * @he_capa_len: the length of the HE capabilities * @txpwr: transmit power for an associated station * @txpwr_set: txpwr field is set * @he_6ghz_capa: HE 6 GHz Band capabilities of station * @eht_capa: EHT capabilities of station * @eht_capa_len: the length of the EHT capabilities * @s1g_capa: S1G capabilities of station */ struct link_station_parameters { const u8 *mld_mac; int link_id; const u8 *link_mac; const u8 *supported_rates; u8 supported_rates_len; const struct ieee80211_ht_cap *ht_capa; const struct ieee80211_vht_cap *vht_capa; u8 opmode_notif; bool opmode_notif_used; const struct ieee80211_he_cap_elem *he_capa; u8 he_capa_len; struct sta_txpwr txpwr; bool txpwr_set; const struct ieee80211_he_6ghz_capa *he_6ghz_capa; const struct ieee80211_eht_cap_elem *eht_capa; u8 eht_capa_len; const struct ieee80211_s1g_cap *s1g_capa; }; /** * struct link_station_del_parameters - link station deletion parameters * * Used to delete a link station entry (or all stations). * * @mld_mac: MAC address of the station * @link_id: the link id */ struct link_station_del_parameters { const u8 *mld_mac; u32 link_id; }; /** * struct cfg80211_ttlm_params: TID to link mapping parameters * * Used for setting a TID to link mapping. * * @dlink: Downlink TID to link mapping, as defined in section 9.4.2.314 * (TID-To-Link Mapping element) in Draft P802.11be_D4.0. * @ulink: Uplink TID to link mapping, as defined in section 9.4.2.314 * (TID-To-Link Mapping element) in Draft P802.11be_D4.0. */ struct cfg80211_ttlm_params { u16 dlink[8]; u16 ulink[8]; }; /** * struct station_parameters - station parameters * * Used to change and create a new station. * * @vlan: vlan interface station should belong to * @sta_flags_mask: station flags that changed * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @sta_flags_set: station flags values * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @listen_interval: listen interval or -1 for no change * @aid: AID or zero for no change * @vlan_id: VLAN ID for station (if nonzero) * @peer_aid: mesh peer AID or zero for no change * @plink_action: plink action to take * @plink_state: set the peer link state for a station * @uapsd_queues: bitmap of queues configured for uapsd. same format * as the AC bitmap in the QoS info field * @max_sp: max Service Period. same format as the MAX_SP in the * QoS info field (but already shifted down) * @sta_modify_mask: bitmap indicating which parameters changed * (for those that don't have a natural "no change" value), * see &enum station_parameters_apply_mask * @local_pm: local link-specific mesh power save mode (no change when set * to unknown) * @capability: station capability * @ext_capab: extended capabilities of the station * @ext_capab_len: number of extended capabilities * @supported_channels: supported channels in IEEE 802.11 format * @supported_channels_len: number of supported channels * @supported_oper_classes: supported oper classes in IEEE 802.11 format * @supported_oper_classes_len: number of supported operating classes * @support_p2p_ps: information if station supports P2P PS mechanism * @airtime_weight: airtime scheduler weight for this station * @eml_cap_present: Specifies if EML capabilities field (@eml_cap) is * present/updated * @eml_cap: EML capabilities of this station * @link_sta_params: link related params. */ struct station_parameters { struct net_device *vlan; u32 sta_flags_mask, sta_flags_set; u32 sta_modify_mask; int listen_interval; u16 aid; u16 vlan_id; u16 peer_aid; u8 plink_action; u8 plink_state; u8 uapsd_queues; u8 max_sp; enum nl80211_mesh_power_mode local_pm; u16 capability; const u8 *ext_capab; u8 ext_capab_len; const u8 *supported_channels; u8 supported_channels_len; const u8 *supported_oper_classes; u8 supported_oper_classes_len; int support_p2p_ps; u16 airtime_weight; bool eml_cap_present; u16 eml_cap; struct link_station_parameters link_sta_params; }; /** * struct station_del_parameters - station deletion parameters * * Used to delete a station entry (or all stations). * * @mac: MAC address of the station to remove or NULL to remove all stations * @subtype: Management frame subtype to use for indicating removal * (10 = Disassociation, 12 = Deauthentication) * @reason_code: Reason code for the Disassociation/Deauthentication frame * @link_id: Link ID indicating a link that stations to be flushed must be * using; valid only for MLO, but can also be -1 for MLO to really * remove all stations. */ struct station_del_parameters { const u8 *mac; u8 subtype; u16 reason_code; int link_id; }; /** * enum cfg80211_station_type - the type of station being modified * @CFG80211_STA_AP_CLIENT: client of an AP interface * @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still * unassociated (update properties for this type of client is permitted) * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has * the AP MLME in the device * @CFG80211_STA_AP_STA: AP station on managed interface * @CFG80211_STA_IBSS: IBSS station * @CFG80211_STA_TDLS_PEER_SETUP: TDLS peer on managed interface (dummy entry * while TDLS setup is in progress, it moves out of this state when * being marked authorized; use this only if TDLS with external setup is * supported/used) * @CFG80211_STA_TDLS_PEER_ACTIVE: TDLS peer on managed interface (active * entry that is operating, has been marked authorized by userspace) * @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed) * @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed) */ enum cfg80211_station_type { CFG80211_STA_AP_CLIENT, CFG80211_STA_AP_CLIENT_UNASSOC, CFG80211_STA_AP_MLME_CLIENT, CFG80211_STA_AP_STA, CFG80211_STA_IBSS, CFG80211_STA_TDLS_PEER_SETUP, CFG80211_STA_TDLS_PEER_ACTIVE, CFG80211_STA_MESH_PEER_KERNEL, CFG80211_STA_MESH_PEER_USER, }; /** * cfg80211_check_station_change - validate parameter changes * @wiphy: the wiphy this operates on * @params: the new parameters for a station * @statype: the type of station being modified * * Utility function for the @change_station driver method. Call this function * with the appropriate station type looking up the station (and checking that * it exists). It will verify whether the station change is acceptable. * * Return: 0 if the change is acceptable, otherwise an error code. Note that * it may modify the parameters for backward compatibility reasons, so don't * use them before calling this. */ int cfg80211_check_station_change(struct wiphy *wiphy, struct station_parameters *params, enum cfg80211_station_type statype); /** * enum rate_info_flags - bitrate info flags * * Used by the driver to indicate the specific rate transmission * type for 802.11n transmissions. * * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval * @RATE_INFO_FLAGS_DMG: 60GHz MCS * @RATE_INFO_FLAGS_HE_MCS: HE MCS information * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information * @RATE_INFO_FLAGS_S1G_MCS: MCS field filled with S1G MCS */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), RATE_INFO_FLAGS_VHT_MCS = BIT(1), RATE_INFO_FLAGS_SHORT_GI = BIT(2), RATE_INFO_FLAGS_DMG = BIT(3), RATE_INFO_FLAGS_HE_MCS = BIT(4), RATE_INFO_FLAGS_EDMG = BIT(5), RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6), RATE_INFO_FLAGS_EHT_MCS = BIT(7), RATE_INFO_FLAGS_S1G_MCS = BIT(8), }; /** * enum rate_info_bw - rate bandwidth information * * Used by the driver to indicate the rate bandwidth. * * @RATE_INFO_BW_5: 5 MHz bandwidth * @RATE_INFO_BW_10: 10 MHz bandwidth * @RATE_INFO_BW_20: 20 MHz bandwidth * @RATE_INFO_BW_40: 40 MHz bandwidth * @RATE_INFO_BW_80: 80 MHz bandwidth * @RATE_INFO_BW_160: 160 MHz bandwidth * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation * @RATE_INFO_BW_320: 320 MHz bandwidth * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation * @RATE_INFO_BW_1: 1 MHz bandwidth * @RATE_INFO_BW_2: 2 MHz bandwidth * @RATE_INFO_BW_4: 4 MHz bandwidth * @RATE_INFO_BW_8: 8 MHz bandwidth * @RATE_INFO_BW_16: 16 MHz bandwidth */ enum rate_info_bw { RATE_INFO_BW_20 = 0, RATE_INFO_BW_5, RATE_INFO_BW_10, RATE_INFO_BW_40, RATE_INFO_BW_80, RATE_INFO_BW_160, RATE_INFO_BW_HE_RU, RATE_INFO_BW_320, RATE_INFO_BW_EHT_RU, RATE_INFO_BW_1, RATE_INFO_BW_2, RATE_INFO_BW_4, RATE_INFO_BW_8, RATE_INFO_BW_16, }; /** * struct rate_info - bitrate information * * Information about a receiving or transmitting bitrate * * @flags: bitflag of flags from &enum rate_info_flags * @legacy: bitrate in 100kbit/s for 802.11abg * @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G rate * @nss: number of streams (VHT & HE only) * @bw: bandwidth (from &enum rate_info_bw) * @he_gi: HE guard interval (from &enum nl80211_he_gi) * @he_dcm: HE DCM value * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, * only valid if bw is %RATE_INFO_BW_HE_RU) * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4) * @eht_gi: EHT guard interval (from &enum nl80211_eht_gi) * @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc, * only valid if bw is %RATE_INFO_BW_EHT_RU) */ struct rate_info { u16 flags; u16 legacy; u8 mcs; u8 nss; u8 bw; u8 he_gi; u8 he_dcm; u8 he_ru_alloc; u8 n_bonded_ch; u8 eht_gi; u8 eht_ru_alloc; }; /** * enum bss_param_flags - bitrate info flags * * Used by the driver to indicate the specific rate transmission * type for 802.11n transmissions. * * @BSS_PARAM_FLAGS_CTS_PROT: whether CTS protection is enabled * @BSS_PARAM_FLAGS_SHORT_PREAMBLE: whether short preamble is enabled * @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled */ enum bss_param_flags { BSS_PARAM_FLAGS_CTS_PROT = BIT(0), BSS_PARAM_FLAGS_SHORT_PREAMBLE = BIT(1), BSS_PARAM_FLAGS_SHORT_SLOT_TIME = BIT(2), }; /** * struct sta_bss_parameters - BSS parameters for the attached station * * Information about the currently associated BSS * * @flags: bitflag of flags from &enum bss_param_flags * @dtim_period: DTIM period for the BSS * @beacon_interval: beacon interval */ struct sta_bss_parameters { u8 flags; u8 dtim_period; u16 beacon_interval; }; /** * struct cfg80211_txq_stats - TXQ statistics for this TID * @filled: bitmap of flags using the bits of &enum nl80211_txq_stats to * indicate the relevant values in this struct are filled * @backlog_bytes: total number of bytes currently backlogged * @backlog_packets: total number of packets currently backlogged * @flows: number of new flows seen * @drops: total number of packets dropped * @ecn_marks: total number of packets marked with ECN CE * @overlimit: number of drops due to queue space overflow * @overmemory: number of drops due to memory limit overflow * @collisions: number of hash collisions * @tx_bytes: total number of bytes dequeued * @tx_packets: total number of packets dequeued * @max_flows: maximum number of flows supported */ struct cfg80211_txq_stats { u32 filled; u32 backlog_bytes; u32 backlog_packets; u32 flows; u32 drops; u32 ecn_marks; u32 overlimit; u32 overmemory; u32 collisions; u32 tx_bytes; u32 tx_packets; u32 max_flows; }; /** * struct cfg80211_tid_stats - per-TID statistics * @filled: bitmap of flags using the bits of &enum nl80211_tid_stats to * indicate the relevant values in this struct are filled * @rx_msdu: number of received MSDUs * @tx_msdu: number of (attempted) transmitted MSDUs * @tx_msdu_retries: number of retries (not counting the first) for * transmitted MSDUs * @tx_msdu_failed: number of failed transmitted MSDUs * @txq_stats: TXQ statistics */ struct cfg80211_tid_stats { u32 filled; u64 rx_msdu; u64 tx_msdu; u64 tx_msdu_retries; u64 tx_msdu_failed; struct cfg80211_txq_stats txq_stats; }; #define IEEE80211_MAX_CHAINS 4 /** * struct link_station_info - link station information * * Link station information filled by driver for get_station() and * dump_station(). * @filled: bit flag of flags using the bits of &enum nl80211_sta_info to * indicate the relevant values in this struct for them * @connected_time: time(in secs) since a link of station is last connected * @inactive_time: time since last activity for link station(tx/rx) * in milliseconds * @assoc_at: bootime (ns) of the last association of link of station * @rx_bytes: bytes (size of MPDUs) received from this link of station * @tx_bytes: bytes (size of MPDUs) transmitted to this link of station * @signal: The signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @signal_avg: Average signal strength, type depends on the wiphy's * signal_type. For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_ * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg * @chain_signal: per-chain signal strength of last received packet in dBm * @chain_signal_avg: per-chain signal strength average in dBm * @txrate: current unicast bitrate from this link of station * @rxrate: current unicast bitrate to this link of station * @rx_packets: packets (MSDUs & MMPDUs) received from this link of station * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this link of station * @tx_retries: cumulative retry counts (MPDUs) for this link of station * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK) * @rx_dropped_misc: Dropped for un-specified reason. * @bss_param: current BSS parameters * @beacon_loss_count: Number of times beacon loss event has triggered. * @expected_throughput: expected throughput in kbps (including 802.11 headers) * towards this station. * @rx_beacon: number of beacons received from this peer * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received * from this peer * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer * @airtime_weight: current airtime scheduling weight * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs. * Note that this doesn't use the @filled bit, but is used if non-NULL. * @ack_signal: signal strength (in dBm) of the last ACK frame. * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has * been sent. * @rx_mpdu_count: number of MPDUs received from this station * @fcs_err_count: number of packets (MPDUs) received from this station with * an FCS error. This counter should be incremented only when TA of the * received packet with an FCS error matches the peer MAC address. * @addr: For MLO STA connection, filled with address of the link of station. */ struct link_station_info { u64 filled; u32 connected_time; u32 inactive_time; u64 assoc_at; u64 rx_bytes; u64 tx_bytes; s8 signal; s8 signal_avg; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; s8 chain_signal_avg[IEEE80211_MAX_CHAINS]; struct rate_info txrate; struct rate_info rxrate; u32 rx_packets; u32 tx_packets; u32 tx_retries; u32 tx_failed; u32 rx_dropped_misc; struct sta_bss_parameters bss_param; u32 beacon_loss_count; u32 expected_throughput; u64 tx_duration; u64 rx_duration; u64 rx_beacon; u8 rx_beacon_signal_avg; u16 airtime_weight; s8 ack_signal; s8 avg_ack_signal; struct cfg80211_tid_stats *pertid; u32 rx_mpdu_count; u32 fcs_err_count; u8 addr[ETH_ALEN] __aligned(2); }; /** * struct station_info - station information * * Station information filled by driver for get_station() and dump_station. * * @filled: bitflag of flags using the bits of &enum nl80211_sta_info to * indicate the relevant values in this struct for them * @connected_time: time(in secs) since a station is last connected * @inactive_time: time since last station activity (tx/rx) in milliseconds * @assoc_at: bootime (ns) of the last association * @rx_bytes: bytes (size of MPDUs) received from this station * @tx_bytes: bytes (size of MPDUs) transmitted to this station * @signal: The signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @signal_avg: Average signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg * @chain_signal: per-chain signal strength of last received packet in dBm * @chain_signal_avg: per-chain signal strength average in dBm * @txrate: current unicast bitrate from this station * @rxrate: current unicast bitrate to this station * @rx_packets: packets (MSDUs & MMPDUs) received from this station * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this station * @tx_retries: cumulative retry counts (MPDUs) * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK) * @rx_dropped_misc: Dropped for un-specified reason. * @bss_param: current BSS parameters * @generation: generation number for nl80211 dumps. * This number should increase every time the list of stations * changes, i.e. when a station is added or removed, so that * userspace can tell whether it got a consistent snapshot. * @beacon_loss_count: Number of times beacon loss event has triggered. * @assoc_req_ies: IEs from (Re)Association Request. * This is used only when in AP mode with drivers that do not use * user space MLME/SME implementation. The information is provided for * the cfg80211_new_sta() calls to notify user space of the IEs. * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. * @sta_flags: station flags mask & values * @t_offset: Time offset of the station relative to this host. * @llid: mesh local link id * @plid: mesh peer link id * @plink_state: mesh peer link state * @connected_to_gate: true if mesh STA has a path to mesh gate * @connected_to_as: true if mesh STA has a path to authentication server * @airtime_link_metric: mesh airtime link metric. * @local_pm: local mesh STA power save mode * @peer_pm: peer mesh STA power save mode * @nonpeer_pm: non-peer mesh STA power save mode * @expected_throughput: expected throughput in kbps (including 802.11 headers) * towards this station. * @rx_beacon: number of beacons received from this peer * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received * from this peer * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer * @airtime_weight: current airtime scheduling weight * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs. * Note that this doesn't use the @filled bit, but is used if non-NULL. * @ack_signal: signal strength (in dBm) of the last ACK frame. * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has * been sent. * @rx_mpdu_count: number of MPDUs received from this station * @fcs_err_count: number of packets (MPDUs) received from this station with * an FCS error. This counter should be incremented only when TA of the * received packet with an FCS error matches the peer MAC address. * @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled * by driver. Drivers use this only in cfg80211_new_sta() calls when AP * MLD's MLME/SME is offload to driver. Drivers won't fill this * information in cfg80211_del_sta_sinfo(), get_station() and * dump_station() callbacks. * @assoc_link_id: Indicates MLO link ID of the AP, with which the station * completed (re)association. This information filled for both MLO * and non-MLO STA connections when the AP affiliated with an MLD. * @mld_addr: For MLO STA connection, filled with MLD address of the station. * For non-MLO STA connection, filled with all zeros. * @assoc_resp_ies: IEs from (Re)Association Response. * This is used only when in AP mode with drivers that do not use user * space MLME/SME implementation. The information is provided only for the * cfg80211_new_sta() calls to notify user space of the IEs. Drivers won't * fill this information in cfg80211_del_sta_sinfo(), get_station() and * dump_station() callbacks. User space needs this information to determine * the accepted and rejected affiliated links of the connected station. * @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets. * @valid_links: bitmap of valid links, or 0 for non-MLO. Drivers fill this * information in cfg80211_new_sta(), cfg80211_del_sta_sinfo(), * get_station() and dump_station() callbacks. * @links: reference to Link sta entries for MLO STA, all link specific * information is accessed through links[link_id]. */ struct station_info { u64 filled; u32 connected_time; u32 inactive_time; u64 assoc_at; u64 rx_bytes; u64 tx_bytes; s8 signal; s8 signal_avg; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; s8 chain_signal_avg[IEEE80211_MAX_CHAINS]; struct rate_info txrate; struct rate_info rxrate; u32 rx_packets; u32 tx_packets; u32 tx_retries; u32 tx_failed; u32 rx_dropped_misc; struct sta_bss_parameters bss_param; struct nl80211_sta_flag_update sta_flags; int generation; u32 beacon_loss_count; const u8 *assoc_req_ies; size_t assoc_req_ies_len; s64 t_offset; u16 llid; u16 plid; u8 plink_state; u8 connected_to_gate; u8 connected_to_as; u32 airtime_link_metric; enum nl80211_mesh_power_mode local_pm; enum nl80211_mesh_power_mode peer_pm; enum nl80211_mesh_power_mode nonpeer_pm; u32 expected_throughput; u16 airtime_weight; s8 ack_signal; s8 avg_ack_signal; struct cfg80211_tid_stats *pertid; u64 tx_duration; u64 rx_duration; u64 rx_beacon; u8 rx_beacon_signal_avg; u32 rx_mpdu_count; u32 fcs_err_count; bool mlo_params_valid; u8 assoc_link_id; u8 mld_addr[ETH_ALEN] __aligned(2); const u8 *assoc_resp_ies; size_t assoc_resp_ies_len; u16 valid_links; struct link_station_info *links[IEEE80211_MLD_MAX_NUM_LINKS]; }; /** * struct cfg80211_sar_sub_specs - sub specs limit * @power: power limitation in 0.25dbm * @freq_range_index: index the power limitation applies to */ struct cfg80211_sar_sub_specs { s32 power; u32 freq_range_index; }; /** * struct cfg80211_sar_specs - sar limit specs * @type: it's set with power in 0.25dbm or other types * @num_sub_specs: number of sar sub specs * @sub_specs: memory to hold the sar sub specs */ struct cfg80211_sar_specs { enum nl80211_sar_type type; u32 num_sub_specs; struct cfg80211_sar_sub_specs sub_specs[] __counted_by(num_sub_specs); }; /** * struct cfg80211_sar_freq_ranges - sar frequency ranges * @start_freq: start range edge frequency * @end_freq: end range edge frequency */ struct cfg80211_sar_freq_ranges { u32 start_freq; u32 end_freq; }; /** * struct cfg80211_sar_capa - sar limit capability * @type: it's set via power in 0.25dbm or other types * @num_freq_ranges: number of frequency ranges * @freq_ranges: memory to hold the freq ranges. * * Note: WLAN driver may append new ranges or split an existing * range to small ones and then append them. */ struct cfg80211_sar_capa { enum nl80211_sar_type type; u32 num_freq_ranges; const struct cfg80211_sar_freq_ranges *freq_ranges; }; #if IS_ENABLED(CONFIG_CFG80211) /** * cfg80211_get_station - retrieve information about a given station * @dev: the device where the station is supposed to be connected to * @mac_addr: the mac address of the station of interest * @sinfo: pointer to the structure to fill with the information * * Return: 0 on success and sinfo is filled with the available information * otherwise returns a negative error code and the content of sinfo has to be * considered undefined. */ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo); #else static inline int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { return -ENOENT; } #endif /** * enum monitor_flags - monitor flags * * Monitor interface configuration flags. Note that these must be the bits * according to the nl80211 flags. * * @MONITOR_FLAG_CHANGED: set if the flags were changed * @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP * @MONITOR_FLAG_CONTROL: pass control frames * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering * @MONITOR_FLAG_COOK_FRAMES: deprecated, will unconditionally be refused * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address * @MONITOR_FLAG_SKIP_TX: do not pass locally transmitted frames */ enum monitor_flags { MONITOR_FLAG_CHANGED = BIT(__NL80211_MNTR_FLAG_INVALID), MONITOR_FLAG_FCSFAIL = BIT(NL80211_MNTR_FLAG_FCSFAIL), MONITOR_FLAG_PLCPFAIL = BIT(NL80211_MNTR_FLAG_PLCPFAIL), MONITOR_FLAG_CONTROL = BIT(NL80211_MNTR_FLAG_CONTROL), MONITOR_FLAG_OTHER_BSS = BIT(NL80211_MNTR_FLAG_OTHER_BSS), MONITOR_FLAG_COOK_FRAMES = BIT(NL80211_MNTR_FLAG_COOK_FRAMES), MONITOR_FLAG_ACTIVE = BIT(NL80211_MNTR_FLAG_ACTIVE), MONITOR_FLAG_SKIP_TX = BIT(NL80211_MNTR_FLAG_SKIP_TX), }; /** * enum mpath_info_flags - mesh path information flags * * Used by the driver to indicate which info in &struct mpath_info it has filled * in during get_station() or dump_station(). * * @MPATH_INFO_FRAME_QLEN: @frame_qlen filled * @MPATH_INFO_SN: @sn filled * @MPATH_INFO_METRIC: @metric filled * @MPATH_INFO_EXPTIME: @exptime filled * @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled * @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled * @MPATH_INFO_FLAGS: @flags filled * @MPATH_INFO_HOP_COUNT: @hop_count filled * @MPATH_INFO_PATH_CHANGE: @path_change_count filled */ enum mpath_info_flags { MPATH_INFO_FRAME_QLEN = BIT(0), MPATH_INFO_SN = BIT(1), MPATH_INFO_METRIC = BIT(2), MPATH_INFO_EXPTIME = BIT(3), MPATH_INFO_DISCOVERY_TIMEOUT = BIT(4), MPATH_INFO_DISCOVERY_RETRIES = BIT(5), MPATH_INFO_FLAGS = BIT(6), MPATH_INFO_HOP_COUNT = BIT(7), MPATH_INFO_PATH_CHANGE = BIT(8), }; /** * struct mpath_info - mesh path information * * Mesh path information filled by driver for get_mpath() and dump_mpath(). * * @filled: bitfield of flags from &enum mpath_info_flags * @frame_qlen: number of queued frames for this destination * @sn: target sequence number * @metric: metric (cost) of this mesh path * @exptime: expiration time for the mesh path from now, in msecs * @flags: mesh path flags from &enum mesh_path_flags * @discovery_timeout: total mesh path discovery timeout, in msecs * @discovery_retries: mesh path discovery retries * @generation: generation number for nl80211 dumps. * This number should increase every time the list of mesh paths * changes, i.e. when a station is added or removed, so that * userspace can tell whether it got a consistent snapshot. * @hop_count: hops to destination * @path_change_count: total number of path changes to destination */ struct mpath_info { u32 filled; u32 frame_qlen; u32 sn; u32 metric; u32 exptime; u32 discovery_timeout; u8 discovery_retries; u8 flags; u8 hop_count; u32 path_change_count; int generation; }; /** * enum wiphy_bss_param_flags - bit positions for supported bss parameters. * * @WIPHY_BSS_PARAM_CTS_PROT: support changing CTS protection. * @WIPHY_BSS_PARAM_SHORT_PREAMBLE: support changing short preamble usage. * @WIPHY_BSS_PARAM_SHORT_SLOT_TIME: support changing short slot time usage. * @WIPHY_BSS_PARAM_BASIC_RATES: support reconfiguring basic rates. * @WIPHY_BSS_PARAM_AP_ISOLATE: support changing AP isolation. * @WIPHY_BSS_PARAM_HT_OPMODE: support changing HT operating mode. * @WIPHY_BSS_PARAM_P2P_CTWINDOW: support reconfiguring ctwindow. * @WIPHY_BSS_PARAM_P2P_OPPPS: support changing P2P opportunistic power-save. */ enum wiphy_bss_param_flags { WIPHY_BSS_PARAM_CTS_PROT = BIT(0), WIPHY_BSS_PARAM_SHORT_PREAMBLE = BIT(1), WIPHY_BSS_PARAM_SHORT_SLOT_TIME = BIT(2), WIPHY_BSS_PARAM_BASIC_RATES = BIT(3), WIPHY_BSS_PARAM_AP_ISOLATE = BIT(4), WIPHY_BSS_PARAM_HT_OPMODE = BIT(5), WIPHY_BSS_PARAM_P2P_CTWINDOW = BIT(6), WIPHY_BSS_PARAM_P2P_OPPPS = BIT(7), }; /** * struct bss_parameters - BSS parameters * * Used to change BSS parameters (mainly for AP mode). * * @link_id: link_id or -1 for non-MLD * @use_cts_prot: Whether to use CTS protection * (0 = no, 1 = yes, -1 = do not change) * @use_short_preamble: Whether the use of short preambles is allowed * (0 = no, 1 = yes, -1 = do not change) * @use_short_slot_time: Whether the use of short slot time is allowed * (0 = no, 1 = yes, -1 = do not change) * @basic_rates: basic rates in IEEE 802.11 format * (or NULL for no change) * @basic_rates_len: number of basic rates * @ap_isolate: do not forward packets between connected stations * (0 = no, 1 = yes, -1 = do not change) * @ht_opmode: HT Operation mode * (u16 = opmode, -1 = do not change) * @p2p_ctwindow: P2P CT Window (-1 = no change) * @p2p_opp_ps: P2P opportunistic PS (-1 = no change) */ struct bss_parameters { int link_id; int use_cts_prot; int use_short_preamble; int use_short_slot_time; const u8 *basic_rates; u8 basic_rates_len; int ap_isolate; int ht_opmode; s8 p2p_ctwindow, p2p_opp_ps; }; /** * struct mesh_config - 802.11s mesh configuration * * These parameters can be changed while the mesh is active. * * @dot11MeshRetryTimeout: the initial retry timeout in millisecond units used * by the Mesh Peering Open message * @dot11MeshConfirmTimeout: the initial retry timeout in millisecond units * used by the Mesh Peering Open message * @dot11MeshHoldingTimeout: the confirm timeout in millisecond units used by * the mesh peering management to close a mesh peering * @dot11MeshMaxPeerLinks: the maximum number of peer links allowed on this * mesh interface * @dot11MeshMaxRetries: the maximum number of peer link open retries that can * be sent to establish a new peer link instance in a mesh * @dot11MeshTTL: the value of TTL field set at a source mesh STA * @element_ttl: the value of TTL field set at a mesh STA for path selection * elements * @auto_open_plinks: whether we should automatically open peer links when we * detect compatible mesh peers * @dot11MeshNbrOffsetMaxNeighbor: the maximum number of neighbors to * synchronize to for 11s default synchronization method * @dot11MeshHWMPmaxPREQretries: the number of action frames containing a PREQ * that an originator mesh STA can send to a particular path target * @path_refresh_time: how frequently to refresh mesh paths in milliseconds * @min_discovery_timeout: the minimum length of time to wait until giving up on * a path discovery in milliseconds * @dot11MeshHWMPactivePathTimeout: the time (in TUs) for which mesh STAs * receiving a PREQ shall consider the forwarding information from the * root to be valid. (TU = time unit) * @dot11MeshHWMPpreqMinInterval: the minimum interval of time (in TUs) during * which a mesh STA can send only one action frame containing a PREQ * element * @dot11MeshHWMPperrMinInterval: the minimum interval of time (in TUs) during * which a mesh STA can send only one Action frame containing a PERR * element * @dot11MeshHWMPnetDiameterTraversalTime: the interval of time (in TUs) that * it takes for an HWMP information element to propagate across the mesh * @dot11MeshHWMPRootMode: the configuration of a mesh STA as root mesh STA * @dot11MeshHWMPRannInterval: the interval of time (in TUs) between root * announcements are transmitted * @dot11MeshGateAnnouncementProtocol: whether to advertise that this mesh * station has access to a broader network beyond the MBSS. (This is * missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol set to true * only means that the station will announce others it's a mesh gate, but * not necessarily using the gate announcement protocol. Still keeping the * same nomenclature to be in sync with the spec) * @dot11MeshForwarding: whether the Mesh STA is forwarding or non-forwarding * entity (default is TRUE - forwarding entity) * @rssi_threshold: the threshold for average signal strength of candidate * station to establish a peer link * @ht_opmode: mesh HT protection mode * * @dot11MeshHWMPactivePathToRootTimeout: The time (in TUs) for which mesh STAs * receiving a proactive PREQ shall consider the forwarding information to * the root mesh STA to be valid. * * @dot11MeshHWMProotInterval: The interval of time (in TUs) between proactive * PREQs are transmitted. * @dot11MeshHWMPconfirmationInterval: The minimum interval of time (in TUs) * during which a mesh STA can send only one Action frame containing * a PREQ element for root path confirmation. * @power_mode: The default mesh power save mode which will be the initial * setting for new peer links. * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake * after transmitting its beacon. * @plink_timeout: If no tx activity is seen from a STA we've established * peering with for longer than this time (in seconds), then remove it * from the STA's list of peers. Default is 30 minutes. * @dot11MeshConnectedToAuthServer: if set to true then this mesh STA * will advertise that it is connected to a authentication server * in the mesh formation field. * @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is * connected to a mesh gate in mesh formation info. If false, the * value in mesh formation is determined by the presence of root paths * in the mesh path table * @dot11MeshNolearn: Try to avoid multi-hop path discovery (e.g. PREQ/PREP * for HWMP) if the destination is a direct neighbor. Note that this might * not be the optimal decision as a multi-hop route might be better. So * if using this setting you will likely also want to disable * dot11MeshForwarding and use another mesh routing protocol on top. */ struct mesh_config { u16 dot11MeshRetryTimeout; u16 dot11MeshConfirmTimeout; u16 dot11MeshHoldingTimeout; u16 dot11MeshMaxPeerLinks; u8 dot11MeshMaxRetries; u8 dot11MeshTTL; u8 element_ttl; bool auto_open_plinks; u32 dot11MeshNbrOffsetMaxNeighbor; u8 dot11MeshHWMPmaxPREQretries; u32 path_refresh_time; u16 min_discovery_timeout; u32 dot11MeshHWMPactivePathTimeout; u16 dot11MeshHWMPpreqMinInterval; u16 dot11MeshHWMPperrMinInterval; u16 dot11MeshHWMPnetDiameterTraversalTime; u8 dot11MeshHWMPRootMode; bool dot11MeshConnectedToMeshGate; bool dot11MeshConnectedToAuthServer; u16 dot11MeshHWMPRannInterval; bool dot11MeshGateAnnouncementProtocol; bool dot11MeshForwarding; s32 rssi_threshold; u16 ht_opmode; u32 dot11MeshHWMPactivePathToRootTimeout; u16 dot11MeshHWMProotInterval; u16 dot11MeshHWMPconfirmationInterval; enum nl80211_mesh_power_mode power_mode; u16 dot11MeshAwakeWindowDuration; u32 plink_timeout; bool dot11MeshNolearn; }; /** * struct mesh_setup - 802.11s mesh setup configuration * @chandef: defines the channel to use * @mesh_id: the mesh ID * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes * @sync_method: which synchronization method to use * @path_sel_proto: which path selection protocol to use * @path_metric: which metric to use * @auth_id: which authentication method this mesh is using * @ie: vendor information elements (optional) * @ie_len: length of vendor information elements * @is_authenticated: this mesh requires authentication * @is_secure: this mesh uses security * @user_mpm: userspace handles all MPM functions * @dtim_period: DTIM period to use * @beacon_interval: beacon interval to use * @mcast_rate: multicast rate for Mesh Node [6Mbps is the default for 802.11a] * @basic_rates: basic rates to use when creating the mesh * @beacon_rate: bitrate to be used for beacons * @userspace_handles_dfs: whether user space controls DFS operation, i.e. * changes the channel when a radar is detected. This is required * to operate on DFS channels. * @control_port_over_nl80211: TRUE if userspace expects to exchange control * port frames over NL80211 instead of the network interface. * * These parameters are fixed when the mesh is created. */ struct mesh_setup { struct cfg80211_chan_def chandef; const u8 *mesh_id; u8 mesh_id_len; u8 sync_method; u8 path_sel_proto; u8 path_metric; u8 auth_id; const u8 *ie; u8 ie_len; bool is_authenticated; bool is_secure; bool user_mpm; u8 dtim_period; u16 beacon_interval; int mcast_rate[NUM_NL80211_BANDS]; u32 basic_rates; struct cfg80211_bitrate_mask beacon_rate; bool userspace_handles_dfs; bool control_port_over_nl80211; }; /** * struct ocb_setup - 802.11p OCB mode setup configuration * @chandef: defines the channel to use * * These parameters are fixed when connecting to the network */ struct ocb_setup { struct cfg80211_chan_def chandef; }; /** * struct ieee80211_txq_params - TX queue parameters * @ac: AC identifier * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range * 1..32767] * @cwmax: Maximum contention window [a value of the form 2^n-1 in the range * 1..32767] * @aifs: Arbitration interframe space [0..255] * @link_id: link_id or -1 for non-MLD */ struct ieee80211_txq_params { enum nl80211_ac ac; u16 txop; u16 cwmin; u16 cwmax; u8 aifs; int link_id; }; /** * DOC: Scanning and BSS list handling * * The scanning process itself is fairly simple, but cfg80211 offers quite * a bit of helper functionality. To start a scan, the scan operation will * be invoked with a scan definition. This scan definition contains the * channels to scan, and the SSIDs to send probe requests for (including the * wildcard, if desired). A passive scan is indicated by having no SSIDs to * probe. Additionally, a scan request may contain extra information elements * that should be added to the probe request. The IEs are guaranteed to be * well-formed, and will not exceed the maximum length the driver advertised * in the wiphy structure. * * When scanning finds a BSS, cfg80211 needs to be notified of that, because * it is responsible for maintaining the BSS list; the driver should not * maintain a list itself. For this notification, various functions exist. * * Since drivers do not maintain a BSS list, there are also a number of * functions to search for a BSS and obtain information about it from the * BSS structure cfg80211 maintains. The BSS list is also made available * to userspace. */ /** * struct cfg80211_ssid - SSID description * @ssid: the SSID * @ssid_len: length of the ssid */ struct cfg80211_ssid { u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len; }; /** * struct cfg80211_scan_info - information about completed scan * @scan_start_tsf: scan start time in terms of the TSF of the BSS that the * wireless device that requested the scan is connected to. If this * information is not available, this field is left zero. * @tsf_bssid: the BSSID according to which %scan_start_tsf is set. * @aborted: set to true if the scan was aborted for any reason, * userspace will be notified of that */ struct cfg80211_scan_info { u64 scan_start_tsf; u8 tsf_bssid[ETH_ALEN] __aligned(2); bool aborted; }; /** * struct cfg80211_scan_6ghz_params - relevant for 6 GHz only * * @short_ssid: short ssid to scan for * @bssid: bssid to scan for * @channel_idx: idx of the channel in the channel array in the scan request * which the above info is relevant to * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU * @short_ssid_valid: @short_ssid is valid and can be used * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait * 20 TUs before starting to send probe requests. * @psd_20: The AP's 20 MHz PSD value. */ struct cfg80211_scan_6ghz_params { u32 short_ssid; u32 channel_idx; u8 bssid[ETH_ALEN]; bool unsolicited_probe; bool short_ssid_valid; bool psc_no_listen; s8 psd_20; }; /** * struct cfg80211_scan_request - scan request description * * @ssids: SSIDs to scan for (active scan only) * @n_ssids: number of SSIDs * @channels: channels to scan on. * @n_channels: total number of channels to scan * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @duration: how long to listen on each channel, in TUs. If * %duration_mandatory is not set, this is the maximum dwell time and * the actual dwell time may be shorter. * @duration_mandatory: if set, the scan duration must be as specified by the * %duration field. * @flags: control flags from &enum nl80211_scan_flags * @rates: bitmap of rates to advertise for each band * @wiphy: the wiphy this was for * @scan_start: time (in jiffies) when the scan started * @wdev: the wireless device to scan for * @no_cck: used to send probe requests at non CCK rate in 2GHz band * @mac_addr: MAC address used with randomisation * @mac_addr_mask: MAC address mask used with randomisation, bits that * are 0 in the mask should be randomised, bits that are 1 should * be taken from the @mac_addr * @scan_6ghz: relevant for split scan request only, * true if this is a 6 GHz scan request * @first_part: %true if this is the first part of a split scan request or a * scan that was not split. May be %true for a @scan_6ghz scan if no other * channels were requested * @n_6ghz_params: number of 6 GHz params * @scan_6ghz_params: 6 GHz params * @bssid: BSSID to scan for (most commonly, the wildcard BSSID) * @tsf_report_link_id: for MLO, indicates the link ID of the BSS that should be * used for TSF reporting. Can be set to -1 to indicate no preference. */ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; const u8 *ie; size_t ie_len; u16 duration; bool duration_mandatory; u32 flags; u32 rates[NUM_NL80211_BANDS]; struct wireless_dev *wdev; u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); u8 bssid[ETH_ALEN] __aligned(2); struct wiphy *wiphy; unsigned long scan_start; bool no_cck; bool scan_6ghz; bool first_part; u32 n_6ghz_params; struct cfg80211_scan_6ghz_params *scan_6ghz_params; s8 tsf_report_link_id; /* keep last */ struct ieee80211_channel *channels[]; }; static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) { int i; get_random_bytes(buf, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) { buf[i] &= ~mask[i]; buf[i] |= addr[i] & mask[i]; } } /** * struct cfg80211_match_set - sets of attributes to match * * @ssid: SSID to be matched; may be zero-length in case of BSSID match * or no match (RSSI only) * @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match * or no match (RSSI only) * @rssi_thold: don't report scan results below this threshold (in s32 dBm) */ struct cfg80211_match_set { struct cfg80211_ssid ssid; u8 bssid[ETH_ALEN]; s32 rssi_thold; }; /** * struct cfg80211_sched_scan_plan - scan plan for scheduled scan * * @interval: interval between scheduled scan iterations. In seconds. * @iterations: number of scan iterations in this scan plan. Zero means * infinite loop. * The last scan plan will always have this parameter set to zero, * all other scan plans will have a finite number of iterations. */ struct cfg80211_sched_scan_plan { u32 interval; u32 iterations; }; /** * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment. * * @band: band of BSS which should match for RSSI level adjustment. * @delta: value of RSSI level adjustment. */ struct cfg80211_bss_select_adjust { enum nl80211_band band; s8 delta; }; /** * struct cfg80211_sched_scan_request - scheduled scan request description * * @reqid: identifies this request. * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) * @n_ssids: number of SSIDs * @n_channels: total number of channels to scan * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @flags: control flags from &enum nl80211_scan_flags * @match_sets: sets of parameters to be matched for a scan result * entry to be considered valid and to be passed to the host * (others are filtered out). * If omitted, all results are passed. * @n_match_sets: number of match sets * @report_results: indicates that results were reported for this request * @wiphy: the wiphy this was for * @dev: the interface * @scan_start: start time of the scheduled scan * @channels: channels to scan * @min_rssi_thold: for drivers only supporting a single threshold, this * contains the minimum over all matchsets * @mac_addr: MAC address used with randomisation * @mac_addr_mask: MAC address mask used with randomisation, bits that * are 0 in the mask should be randomised, bits that are 1 should * be taken from the @mac_addr * @scan_plans: scan plans to be executed in this scheduled scan. Lowest * index must be executed first. * @n_scan_plans: number of scan plans, at least 1. * @rcu_head: RCU callback used to free the struct * @owner_nlportid: netlink portid of owner (if this should is a request * owned by a particular socket) * @nl_owner_dead: netlink owner socket was closed - this request be freed * @list: for keeping list of requests. * @delay: delay in seconds to use before starting the first scan * cycle. The driver may ignore this parameter and start * immediately (or at any other time), if this feature is not * supported. * @relative_rssi_set: Indicates whether @relative_rssi is set or not. * @relative_rssi: Relative RSSI threshold in dB to restrict scan result * reporting in connected state to cases where a matching BSS is determined * to have better or slightly worse RSSI than the current connected BSS. * The relative RSSI threshold values are ignored in disconnected state. * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong * to the specified band while deciding whether a better BSS is reported * using @relative_rssi. If delta is a negative number, the BSSs that * belong to the specified band will be penalized by delta dB in relative * comparisons. */ struct cfg80211_sched_scan_request { u64 reqid; struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; const u8 *ie; size_t ie_len; u32 flags; struct cfg80211_match_set *match_sets; int n_match_sets; s32 min_rssi_thold; u32 delay; struct cfg80211_sched_scan_plan *scan_plans; int n_scan_plans; u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); bool relative_rssi_set; s8 relative_rssi; struct cfg80211_bss_select_adjust rssi_adjust; /* internal */ struct wiphy *wiphy; struct net_device *dev; unsigned long scan_start; bool report_results; struct rcu_head rcu_head; u32 owner_nlportid; bool nl_owner_dead; struct list_head list; /* keep last */ struct ieee80211_channel *channels[] __counted_by(n_channels); }; /** * enum cfg80211_signal_type - signal type * * @CFG80211_SIGNAL_TYPE_NONE: no signal strength information available * @CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm) * @CFG80211_SIGNAL_TYPE_UNSPEC: signal strength, increasing from 0 through 100 */ enum cfg80211_signal_type { CFG80211_SIGNAL_TYPE_NONE, CFG80211_SIGNAL_TYPE_MBM, CFG80211_SIGNAL_TYPE_UNSPEC, }; /** * struct cfg80211_inform_bss - BSS inform data * @chan: channel the frame was received on * @signal: signal strength value, according to the wiphy's * signal type * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was * received; should match the time when the frame was actually * received by the device (not just by the host, in case it was * buffered on the device) and be accurate to about 10ms. * If the frame isn't buffered, just passing the return value of * ktime_get_boottime_ns() is likely appropriate. * @parent_tsf: the time at the start of reception of the first octet of the * timestamp field of the frame. The time is the TSF of the BSS specified * by %parent_bssid. * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to * the BSS that requested the scan in which the beacon/probe was received. * @chains: bitmask for filled values in @chain_signal. * @chain_signal: per-chain signal strength of last received BSS in dBm. * @restrict_use: restrict usage, if not set, assume @use_for is * %NL80211_BSS_USE_FOR_NORMAL. * @use_for: bitmap of possible usage for this BSS, see * &enum nl80211_bss_use_for * @cannot_use_reasons: the reasons (bitmap) for not being able to connect, * if @restrict_use is set and @use_for is zero (empty); may be 0 for * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons * @drv_data: Data to be passed through to @inform_bss */ struct cfg80211_inform_bss { struct ieee80211_channel *chan; s32 signal; u64 boottime_ns; u64 parent_tsf; u8 parent_bssid[ETH_ALEN] __aligned(2); u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 restrict_use:1, use_for:7; u8 cannot_use_reasons; void *drv_data; }; /** * struct cfg80211_bss_ies - BSS entry IE data * @tsf: TSF contained in the frame that carried these IEs * @rcu_head: internal use, for freeing * @len: length of the IEs * @from_beacon: these IEs are known to come from a beacon * @data: IE data */ struct cfg80211_bss_ies { u64 tsf; struct rcu_head rcu_head; int len; bool from_beacon; u8 data[]; }; /** * struct cfg80211_bss - BSS description * * This structure describes a BSS (which may also be a mesh network) * for use in scan results and similar. * * @channel: channel this BSS is on * @bssid: BSSID of the BSS * @beacon_interval: the beacon interval as from the frame * @capability: the capability field in host byte order * @ies: the information elements (Note that there is no guarantee that these * are well-formed!); this is a pointer to either the beacon_ies or * proberesp_ies depending on whether Probe Response frame has been * received. It is always non-%NULL. * @beacon_ies: the information elements from the last Beacon frame * (implementation note: if @hidden_beacon_bss is set this struct doesn't * own the beacon_ies, but they're just pointers to the ones from the * @hidden_beacon_bss struct) * @proberesp_ies: the information elements from the last Probe Response frame * @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame, * cannot rely on it having valid data * @hidden_beacon_bss: in case this BSS struct represents a probe response from * a BSS that hides the SSID in its beacon, this points to the BSS struct * that holds the beacon data. @beacon_ies is still valid, of course, and * points to the same data as hidden_beacon_bss->beacon_ies in that case. * @transmitted_bss: pointer to the transmitted BSS, if this is a * non-transmitted one (multi-BSSID support) * @nontrans_list: list of non-transmitted BSS, if this is a transmitted one * (multi-BSSID support) * @signal: signal strength value (type depends on the wiphy's signal_type) * @ts_boottime: timestamp of the last BSS update in nanoseconds since boot * @chains: bitmask for filled values in @chain_signal. * @chain_signal: per-chain signal strength of last received BSS in dBm. * @bssid_index: index in the multiple BSS set * @max_bssid_indicator: max number of members in the BSS set * @use_for: bitmap of possible usage for this BSS, see * &enum nl80211_bss_use_for * @cannot_use_reasons: the reasons (bitmap) for not being able to connect, * if @restrict_use is set and @use_for is zero (empty); may be 0 for * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ struct cfg80211_bss { struct ieee80211_channel *channel; const struct cfg80211_bss_ies __rcu *ies; const struct cfg80211_bss_ies __rcu *beacon_ies; const struct cfg80211_bss_ies __rcu *proberesp_ies; struct cfg80211_bss *hidden_beacon_bss; struct cfg80211_bss *transmitted_bss; struct list_head nontrans_list; s32 signal; u64 ts_boottime; u16 beacon_interval; u16 capability; u8 bssid[ETH_ALEN]; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 proberesp_ecsa_stuck:1; u8 bssid_index; u8 max_bssid_indicator; u8 use_for; u8 cannot_use_reasons; u8 priv[] __aligned(sizeof(void *)); }; /** * ieee80211_bss_get_elem - find element with given ID * @bss: the bss to search * @id: the element ID * * Note that the return value is an RCU-protected pointer, so * rcu_read_lock() must be held when calling this function. * Return: %NULL if not found. */ const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id); /** * ieee80211_bss_get_ie - find IE with given ID * @bss: the bss to search * @id: the element ID * * Note that the return value is an RCU-protected pointer, so * rcu_read_lock() must be held when calling this function. * Return: %NULL if not found. */ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id) { return (const void *)ieee80211_bss_get_elem(bss, id); } /** * struct cfg80211_auth_request - Authentication request data * * This structure provides information needed to complete IEEE 802.11 * authentication. * * @bss: The BSS to authenticate with, the callee must obtain a reference * to it if it needs to keep it. * @supported_selectors: List of selectors that should be assumed to be * supported by the station. * SAE_H2E must be assumed supported if set to %NULL. * @supported_selectors_len: Length of supported_selectors in octets. * @auth_type: Authentication type (algorithm) * @ie: Extra IEs to add to Authentication frame or %NULL * @ie_len: Length of ie buffer in octets * @key_len: length of WEP key for shared key authentication * @key_idx: index of WEP key for shared key authentication * @key: WEP key for shared key authentication * @auth_data: Fields and elements in Authentication frames. This contains * the authentication frame body (non-IE and IE data), excluding the * Authentication algorithm number, i.e., starting at the Authentication * transaction sequence number field. * @auth_data_len: Length of auth_data buffer in octets * @link_id: if >= 0, indicates authentication should be done as an MLD, * the interface address is included as the MLD address and the * necessary link (with the given link_id) will be created (and * given an MLD address) by the driver * @ap_mld_addr: AP MLD address in case of authentication request with * an AP MLD, valid iff @link_id >= 0 */ struct cfg80211_auth_request { struct cfg80211_bss *bss; const u8 *ie; size_t ie_len; const u8 *supported_selectors; u8 supported_selectors_len; enum nl80211_auth_type auth_type; const u8 *key; u8 key_len; s8 key_idx; const u8 *auth_data; size_t auth_data_len; s8 link_id; const u8 *ap_mld_addr; }; /** * struct cfg80211_assoc_link - per-link information for MLO association * @bss: the BSS pointer, see also &struct cfg80211_assoc_request::bss; * if this is %NULL for a link, that link is not requested * @elems: extra elements for the per-STA profile for this link * @elems_len: length of the elements * @disabled: If set this link should be included during association etc. but it * should not be used until enabled by the AP MLD. * @error: per-link error code, must be <= 0. If there is an error, then the * operation as a whole must fail. */ struct cfg80211_assoc_link { struct cfg80211_bss *bss; const u8 *elems; size_t elems_len; bool disabled; int error; }; /** * struct cfg80211_ml_reconf_req - MLO link reconfiguration request * @add_links: data for links to add, see &struct cfg80211_assoc_link * @rem_links: bitmap of links to remove * @ext_mld_capa_ops: extended MLD capabilities and operations set by * userspace for the ML reconfiguration action frame */ struct cfg80211_ml_reconf_req { struct cfg80211_assoc_link add_links[IEEE80211_MLD_MAX_NUM_LINKS]; u16 rem_links; u16 ext_mld_capa_ops; }; /** * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association. * * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) * @ASSOC_REQ_DISABLE_VHT: Disable VHT * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association * @CONNECT_REQ_EXTERNAL_AUTH_SUPPORT: User space indicates external * authentication capability. Drivers can offload authentication to * userspace if this flag is set. Only applicable for cfg80211_connect() * request (connect callback). * @ASSOC_REQ_DISABLE_HE: Disable HE * @ASSOC_REQ_DISABLE_EHT: Disable EHT * @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links. * Drivers shall disable MLO features for the current association if this * flag is not set. * @ASSOC_REQ_SPP_AMSDU: SPP A-MSDUs will be used on this connection (if any) */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), ASSOC_REQ_DISABLE_VHT = BIT(1), ASSOC_REQ_USE_RRM = BIT(2), CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3), ASSOC_REQ_DISABLE_HE = BIT(4), ASSOC_REQ_DISABLE_EHT = BIT(5), CONNECT_REQ_MLO_SUPPORT = BIT(6), ASSOC_REQ_SPP_AMSDU = BIT(7), }; /** * struct cfg80211_assoc_request - (Re)Association request data * * This structure provides information needed to complete IEEE 802.11 * (re)association. * @bss: The BSS to associate with. If the call is successful the driver is * given a reference that it must give back to cfg80211_send_rx_assoc() * or to cfg80211_assoc_timeout(). To ensure proper refcounting, new * association requests while already associating must be rejected. * This also applies to the @links.bss parameter, which is used instead * of this one (it is %NULL) for MLO associations. * @ie: Extra IEs to add to (Re)Association Request frame or %NULL * @ie_len: Length of ie buffer in octets * @use_mfp: Use management frame protection (IEEE 802.11w) in this association * @crypto: crypto settings * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used * to indicate a request to reassociate within the ESS instead of a request * do the initial association with the ESS. When included, this is set to * the BSSID of the current association, i.e., to the value that is * included in the Current AP address field of the Reassociation Request * frame. * @flags: See &enum cfg80211_assoc_req_flags * @supported_selectors: supported BSS selectors in IEEE 802.11 format * (or %NULL for no change). * If %NULL, then support for SAE_H2E should be assumed. * @supported_selectors_len: number of supported BSS selectors * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT capability override * @vht_capa_mask: VHT capability mask indicating which fields to use * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or * %NULL if FILS is not used. * @fils_kek_len: Length of fils_kek in octets * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association * Request/Response frame or %NULL if FILS is not used. This field starts * with 16 octets of STA Nonce followed by 16 octets of AP Nonce. * @s1g_capa: S1G capability override * @s1g_capa_mask: S1G capability override mask * @links: per-link information for MLO connections * @link_id: >= 0 for MLO connections, where links are given, and indicates * the link on which the association request should be sent * @ap_mld_addr: AP MLD address in case of MLO association request, * valid iff @link_id >= 0 * @ext_mld_capa_ops: extended MLD capabilities and operations set by * userspace for the association */ struct cfg80211_assoc_request { struct cfg80211_bss *bss; const u8 *ie, *prev_bssid; size_t ie_len; struct cfg80211_crypto_settings crypto; bool use_mfp; u32 flags; const u8 *supported_selectors; u8 supported_selectors_len; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa, vht_capa_mask; const u8 *fils_kek; size_t fils_kek_len; const u8 *fils_nonces; struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask; struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS]; const u8 *ap_mld_addr; s8 link_id; u16 ext_mld_capa_ops; }; /** * struct cfg80211_deauth_request - Deauthentication request data * * This structure provides information needed to complete IEEE 802.11 * deauthentication. * * @bssid: the BSSID or AP MLD address to deauthenticate from * @ie: Extra IEs to add to Deauthentication frame or %NULL * @ie_len: Length of ie buffer in octets * @reason_code: The reason code for the deauthentication * @local_state_change: if set, change local state only and * do not set a deauth frame */ struct cfg80211_deauth_request { const u8 *bssid; const u8 *ie; size_t ie_len; u16 reason_code; bool local_state_change; }; /** * struct cfg80211_disassoc_request - Disassociation request data * * This structure provides information needed to complete IEEE 802.11 * disassociation. * * @ap_addr: the BSSID or AP MLD address to disassociate from * @ie: Extra IEs to add to Disassociation frame or %NULL * @ie_len: Length of ie buffer in octets * @reason_code: The reason code for the disassociation * @local_state_change: This is a request for a local state only, i.e., no * Disassociation frame is to be transmitted. */ struct cfg80211_disassoc_request { const u8 *ap_addr; const u8 *ie; size_t ie_len; u16 reason_code; bool local_state_change; }; /** * struct cfg80211_ibss_params - IBSS parameters * * This structure defines the IBSS parameters for the join_ibss() * method. * * @ssid: The SSID, will always be non-null. * @ssid_len: The length of the SSID, will always be non-zero. * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not * search for IBSSs with a different BSSID. * @chandef: defines the channel to use if no other IBSS to join can be found * @channel_fixed: The channel should be fixed -- do not search for * IBSSs to join on other channels. * @ie: information element(s) to include in the beacon * @ie_len: length of that * @beacon_interval: beacon interval to use * @privacy: this is a protected network, keys will be configured * after joining * @control_port: whether user space controls IEEE 802.1X port, i.e., * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is * required to assume that the port is unauthorized until authorized by * user space. Otherwise, port is marked authorized by default. * @control_port_over_nl80211: TRUE if userspace expects to exchange control * port frames over NL80211 instead of the network interface. * @userspace_handles_dfs: whether user space controls DFS operation, i.e. * changes the channel when a radar is detected. This is required * to operate on DFS channels. * @basic_rates: bitmap of basic rates to use when creating the IBSS * @mcast_rate: per-band multicast rate index + 1 (0: disabled) * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @wep_keys: static WEP keys, if not NULL points to an array of * CFG80211_MAX_WEP_KEYS WEP keys * @wep_tx_key: key index (0..3) of the default TX static WEP key */ struct cfg80211_ibss_params { const u8 *ssid; const u8 *bssid; struct cfg80211_chan_def chandef; const u8 *ie; u8 ssid_len, ie_len; u16 beacon_interval; u32 basic_rates; bool channel_fixed; bool privacy; bool control_port; bool control_port_over_nl80211; bool userspace_handles_dfs; int mcast_rate[NUM_NL80211_BANDS]; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct key_params *wep_keys; int wep_tx_key; }; /** * struct cfg80211_bss_selection - connection parameters for BSS selection. * * @behaviour: requested BSS selection behaviour. * @param: parameters for requestion behaviour. * @param.band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF. * @param.adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST. */ struct cfg80211_bss_selection { enum nl80211_bss_select_attr behaviour; union { enum nl80211_band band_pref; struct cfg80211_bss_select_adjust adjust; } param; }; /** * struct cfg80211_connect_params - Connection parameters * * This structure provides information needed to complete IEEE 802.11 * authentication and association. * * @channel: The channel to use or %NULL if not specified (auto-select based * on scan results) * @channel_hint: The channel of the recommended BSS for initial connection or * %NULL if not specified * @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan * results) * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or * %NULL if not specified. Unlike the @bssid parameter, the driver is * allowed to ignore this @bssid_hint if it has knowledge of a better BSS * to use. * @ssid: SSID * @ssid_len: Length of ssid in octets * @auth_type: Authentication type (algorithm) * @ie: IEs for association request * @ie_len: Length of assoc_ie in octets * @privacy: indicates whether privacy-enabled APs should be used * @mfp: indicate whether management frame protection is used * @crypto: crypto settings * @key_len: length of WEP key for shared key authentication * @key_idx: index of WEP key for shared key authentication * @key: WEP key for shared key authentication * @flags: See &enum cfg80211_assoc_req_flags * @bg_scan_period: Background scan period in seconds * or -1 to indicate that default value is to be used. * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT Capability overrides * @vht_capa_mask: The bits of vht_capa which are to be used. * @pbss: if set, connect to a PCP instead of AP. Valid for DMG * networks. * @bss_select: criteria to be used for BSS selection. * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used * to indicate a request to reassociate within the ESS instead of a request * do the initial association with the ESS. When included, this is set to * the BSSID of the current association, i.e., to the value that is * included in the Current AP address field of the Reassociation Request * frame. * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the * NAI or %NULL if not specified. This is used to construct FILS wrapped * data IE. * @fils_erp_username_len: Length of @fils_erp_username in octets. * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or * %NULL if not specified. This specifies the domain name of ER server and * is used to construct FILS wrapped data IE. * @fils_erp_realm_len: Length of @fils_erp_realm in octets. * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP * messages. This is also used to construct FILS wrapped data IE. * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional * keys in FILS or %NULL if not specified. * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets. * @want_1x: indicates user-space supports and wants to use 802.1X driver * offload of 4-way handshake. * @edmg: define the EDMG channels. * This may specify multiple channels and bonding options for the driver * to choose from, based on BSS configuration. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; struct ieee80211_channel *channel_hint; const u8 *bssid; const u8 *bssid_hint; const u8 *ssid; size_t ssid_len; enum nl80211_auth_type auth_type; const u8 *ie; size_t ie_len; bool privacy; enum nl80211_mfp mfp; struct cfg80211_crypto_settings crypto; const u8 *key; u8 key_len, key_idx; u32 flags; int bg_scan_period; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa; struct ieee80211_vht_cap vht_capa_mask; bool pbss; struct cfg80211_bss_selection bss_select; const u8 *prev_bssid; const u8 *fils_erp_username; size_t fils_erp_username_len; const u8 *fils_erp_realm; size_t fils_erp_realm_len; u16 fils_erp_next_seq_num; const u8 *fils_erp_rrk; size_t fils_erp_rrk_len; bool want_1x; struct ieee80211_edmg edmg; }; /** * enum cfg80211_connect_params_changed - Connection parameters being updated * * This enum provides information of all connect parameters that * have to be updated as part of update_connect_params() call. * * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm, * username, erp sequence number and rrk) are updated * @UPDATE_AUTH_TYPE: Indicates that authentication type is updated */ enum cfg80211_connect_params_changed { UPDATE_ASSOC_IES = BIT(0), UPDATE_FILS_ERP_INFO = BIT(1), UPDATE_AUTH_TYPE = BIT(2), }; /** * enum wiphy_params_flags - set_wiphy_params bitfield values * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed * @WIPHY_PARAM_DYN_ACK: dynack has been enabled * @WIPHY_PARAM_TXQ_LIMIT: TXQ packet limit has been changed * @WIPHY_PARAM_TXQ_MEMORY_LIMIT: TXQ memory limit has been changed * @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum */ enum wiphy_params_flags { WIPHY_PARAM_RETRY_SHORT = BIT(0), WIPHY_PARAM_RETRY_LONG = BIT(1), WIPHY_PARAM_FRAG_THRESHOLD = BIT(2), WIPHY_PARAM_RTS_THRESHOLD = BIT(3), WIPHY_PARAM_COVERAGE_CLASS = BIT(4), WIPHY_PARAM_DYN_ACK = BIT(5), WIPHY_PARAM_TXQ_LIMIT = BIT(6), WIPHY_PARAM_TXQ_MEMORY_LIMIT = BIT(7), WIPHY_PARAM_TXQ_QUANTUM = BIT(8), }; #define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256 /* The per TXQ device queue limit in airtime */ #define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L 5000 #define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H 12000 /* The per interface airtime threshold to switch to lower queue limit */ #define IEEE80211_AQL_THRESHOLD 24000 /** * struct cfg80211_pmksa - PMK Security Association * * This structure is passed to the set/del_pmksa() method for PMKSA * caching. * * @bssid: The AP's BSSID (may be %NULL). * @pmkid: The identifier to refer a PMKSA. * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key * derivation by a FILS STA. Otherwise, %NULL. * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on * the hash algorithm used to generate this. * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS * cache identifier (may be %NULL). * @ssid_len: Length of the @ssid in octets. * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the * scope of PMKSA. This is valid only if @ssid_len is non-zero (may be * %NULL). * @pmk_lifetime: Maximum lifetime for PMKSA in seconds * (dot11RSNAConfigPMKLifetime) or 0 if not specified. * The configured PMKSA must not be used for PMKSA caching after * expiration and any keys derived from this PMK become invalid on * expiration, i.e., the current association must be dropped if the PMK * used for it expires. * @pmk_reauth_threshold: Threshold time for reauthentication (percentage of * PMK lifetime, dot11RSNAConfigPMKReauthThreshold) or 0 if not specified. * Drivers are expected to trigger a full authentication instead of using * this PMKSA for caching when reassociating to a new BSS after this * threshold to generate a new PMK before the current one expires. */ struct cfg80211_pmksa { const u8 *bssid; const u8 *pmkid; const u8 *pmk; size_t pmk_len; const u8 *ssid; size_t ssid_len; const u8 *cache_id; u32 pmk_lifetime; u8 pmk_reauth_threshold; }; /** * struct cfg80211_pkt_pattern - packet pattern * @mask: bitmask where to match pattern and where to ignore bytes, * one bit per byte, in same format as nl80211 * @pattern: bytes to match where bitmask is 1 * @pattern_len: length of pattern (in bytes) * @pkt_offset: packet offset (in bytes) * * Internal note: @mask and @pattern are allocated in one chunk of * memory, free @mask only! */ struct cfg80211_pkt_pattern { const u8 *mask, *pattern; int pattern_len; int pkt_offset; }; /** * struct cfg80211_wowlan_tcp - TCP connection parameters * * @sock: (internal) socket for source port allocation * @src: source IP address * @dst: destination IP address * @dst_mac: destination MAC address * @src_port: source port * @dst_port: destination port * @payload_len: data payload length * @payload: data payload buffer * @payload_seq: payload sequence stamping configuration * @data_interval: interval at which to send data packets * @wake_len: wakeup payload match length * @wake_data: wakeup payload match data * @wake_mask: wakeup payload match mask * @tokens_size: length of the tokens buffer * @payload_tok: payload token usage configuration */ struct cfg80211_wowlan_tcp { struct socket *sock; __be32 src, dst; u16 src_port, dst_port; u8 dst_mac[ETH_ALEN]; int payload_len; const u8 *payload; struct nl80211_wowlan_tcp_data_seq payload_seq; u32 data_interval; u32 wake_len; const u8 *wake_data, *wake_mask; u32 tokens_size; /* must be last, variable member */ struct nl80211_wowlan_tcp_data_token payload_tok; }; /** * struct cfg80211_wowlan - Wake on Wireless-LAN support info * * This structure defines the enabled WoWLAN triggers for the device. * @any: wake up on any activity -- special trigger if device continues * operating as normal during suspend * @disconnect: wake up if getting disconnected * @magic_pkt: wake up on receiving magic packet * @patterns: wake up on receiving packet matching a pattern * @n_patterns: number of patterns * @gtk_rekey_failure: wake up on GTK rekey failure * @eap_identity_req: wake up on EAP identity request packet * @four_way_handshake: wake up on 4-way handshake * @rfkill_release: wake up when rfkill is released * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h. * NULL if not configured. * @nd_config: configuration for the scan to be used for net detect wake. */ struct cfg80211_wowlan { bool any, disconnect, magic_pkt, gtk_rekey_failure, eap_identity_req, four_way_handshake, rfkill_release; struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; struct cfg80211_sched_scan_request *nd_config; }; /** * struct cfg80211_coalesce_rules - Coalesce rule parameters * * This structure defines coalesce rule for the device. * @delay: maximum coalescing delay in msecs. * @condition: condition for packet coalescence. * see &enum nl80211_coalesce_condition. * @patterns: array of packet patterns * @n_patterns: number of patterns */ struct cfg80211_coalesce_rules { int delay; enum nl80211_coalesce_condition condition; struct cfg80211_pkt_pattern *patterns; int n_patterns; }; /** * struct cfg80211_coalesce - Packet coalescing settings * * This structure defines coalescing settings. * @rules: array of coalesce rules * @n_rules: number of rules */ struct cfg80211_coalesce { int n_rules; struct cfg80211_coalesce_rules rules[] __counted_by(n_rules); }; /** * struct cfg80211_wowlan_nd_match - information about the match * * @ssid: SSID of the match that triggered the wake up * @n_channels: Number of channels where the match occurred. This * value may be zero if the driver can't report the channels. * @channels: center frequencies of the channels where a match * occurred (in MHz) */ struct cfg80211_wowlan_nd_match { struct cfg80211_ssid ssid; int n_channels; u32 channels[] __counted_by(n_channels); }; /** * struct cfg80211_wowlan_nd_info - net detect wake up information * * @n_matches: Number of match information instances provided in * @matches. This value may be zero if the driver can't provide * match information. * @matches: Array of pointers to matches containing information about * the matches that triggered the wake up. */ struct cfg80211_wowlan_nd_info { int n_matches; struct cfg80211_wowlan_nd_match *matches[] __counted_by(n_matches); }; /** * struct cfg80211_wowlan_wakeup - wakeup report * @disconnect: woke up by getting disconnected * @magic_pkt: woke up by receiving magic packet * @gtk_rekey_failure: woke up by GTK rekey failure * @eap_identity_req: woke up by EAP identity request packet * @four_way_handshake: woke up by 4-way handshake * @rfkill_release: woke up by rfkill being released * @pattern_idx: pattern that caused wakeup, -1 if not due to pattern * @packet_present_len: copied wakeup packet data * @packet_len: original wakeup packet length * @packet: The packet causing the wakeup, if any. * @packet_80211: For pattern match, magic packet and other data * frame triggers an 802.3 frame should be reported, for * disconnect due to deauth 802.11 frame. This indicates which * it is. * @tcp_match: TCP wakeup packet received * @tcp_connlost: TCP connection lost or failed to establish * @tcp_nomoretokens: TCP data ran out of tokens * @net_detect: if not %NULL, woke up because of net detect * @unprot_deauth_disassoc: woke up due to unprotected deauth or * disassoc frame (in MFP). */ struct cfg80211_wowlan_wakeup { bool disconnect, magic_pkt, gtk_rekey_failure, eap_identity_req, four_way_handshake, rfkill_release, packet_80211, tcp_match, tcp_connlost, tcp_nomoretokens, unprot_deauth_disassoc; s32 pattern_idx; u32 packet_present_len, packet_len; const void *packet; struct cfg80211_wowlan_nd_info *net_detect; }; /** * struct cfg80211_gtk_rekey_data - rekey data * @kek: key encryption key (@kek_len bytes) * @kck: key confirmation key (@kck_len bytes) * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) * @kek_len: length of kek * @kck_len: length of kck * @akm: akm (oui, id) */ struct cfg80211_gtk_rekey_data { const u8 *kek, *kck, *replay_ctr; u32 akm; u8 kek_len, kck_len; }; /** * struct cfg80211_update_ft_ies_params - FT IE Information * * This structure provides information needed to update the fast transition IE * * @md: The Mobility Domain ID, 2 Octet value * @ie: Fast Transition IEs * @ie_len: Length of ft_ie in octets */ struct cfg80211_update_ft_ies_params { u16 md; const u8 *ie; size_t ie_len; }; /** * struct cfg80211_mgmt_tx_params - mgmt tx parameters * * This structure provides information needed to transmit a mgmt frame * * @chan: channel to use * @offchan: indicates whether off channel operation is required * @wait: duration for ROC * @buf: buffer to transmit * @len: buffer length * @no_cck: don't use cck rates for this frame * @dont_wait_for_ack: tells the low level not to wait for an ack * @n_csa_offsets: length of csa_offsets array * @csa_offsets: array of all the csa offsets in the frame * @link_id: for MLO, the link ID to transmit on, -1 if not given; note * that the link ID isn't validated (much), it's in range but the * link might not exist (or be used by the receiver STA) */ struct cfg80211_mgmt_tx_params { struct ieee80211_channel *chan; bool offchan; unsigned int wait; const u8 *buf; size_t len; bool no_cck; bool dont_wait_for_ack; int n_csa_offsets; const u16 *csa_offsets; int link_id; }; /** * struct cfg80211_dscp_exception - DSCP exception * * @dscp: DSCP value that does not adhere to the user priority range definition * @up: user priority value to which the corresponding DSCP value belongs */ struct cfg80211_dscp_exception { u8 dscp; u8 up; }; /** * struct cfg80211_dscp_range - DSCP range definition for user priority * * @low: lowest DSCP value of this user priority range, inclusive * @high: highest DSCP value of this user priority range, inclusive */ struct cfg80211_dscp_range { u8 low; u8 high; }; /* QoS Map Set element length defined in IEEE Std 802.11-2012, 8.4.2.97 */ #define IEEE80211_QOS_MAP_MAX_EX 21 #define IEEE80211_QOS_MAP_LEN_MIN 16 #define IEEE80211_QOS_MAP_LEN_MAX \ (IEEE80211_QOS_MAP_LEN_MIN + 2 * IEEE80211_QOS_MAP_MAX_EX) /** * struct cfg80211_qos_map - QoS Map Information * * This struct defines the Interworking QoS map setting for DSCP values * * @num_des: number of DSCP exceptions (0..21) * @dscp_exception: optionally up to maximum of 21 DSCP exceptions from * the user priority DSCP range definition * @up: DSCP range definition for a particular user priority */ struct cfg80211_qos_map { u8 num_des; struct cfg80211_dscp_exception dscp_exception[IEEE80211_QOS_MAP_MAX_EX]; struct cfg80211_dscp_range up[8]; }; /** * struct cfg80211_nan_band_config - NAN band specific configuration * * @chan: Pointer to the IEEE 802.11 channel structure. The channel to be used * for NAN operations on this band. For 2.4 GHz band, this is always * channel 6. For 5 GHz band, the channel is either 44 or 149, according * to the regulatory constraints. If chan pointer is NULL the entire band * configuration entry is considered invalid and should not be used. * @rssi_close: RSSI close threshold used for NAN state transition algorithm * as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State * Transition" of Wi-Fi Aware Specification v4.0. If not * specified (set to 0), default device value is used. The value should * be greater than -60 dBm. * @rssi_middle: RSSI middle threshold used for NAN state transition algorithm. * as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State * Transition" of Wi-Fi Aware Specification v4.0. If not * specified (set to 0), default device value is used. The value should be * greater than -75 dBm and less than rssi_close. * @awake_dw_interval: Committed DW interval. Valid values range: 0-5. 0 * indicates no wakeup for DW and can't be used on 2.4GHz band, otherwise * 2^(n-1). * @disable_scan: If true, the device will not scan this band for cluster * merge. Disabling scan on 2.4 GHz band is not allowed. */ struct cfg80211_nan_band_config { struct ieee80211_channel *chan; s8 rssi_close; s8 rssi_middle; u8 awake_dw_interval; bool disable_scan; }; /** * struct cfg80211_nan_conf - NAN configuration * * This struct defines NAN configuration parameters * * @master_pref: master preference (1 - 255) * @bands: operating bands, a bitmap of &enum nl80211_band values. * For instance, for NL80211_BAND_2GHZ, bit 0 would be set * (i.e. BIT(NL80211_BAND_2GHZ)). * @cluster_id: cluster ID used for NAN synchronization. This is a MAC address * that can take a value from 50-6F-9A-01-00-00 to 50-6F-9A-01-FF-FF. * If NULL, the device will pick a random Cluster ID. * @scan_period: period (in seconds) between NAN scans. * @scan_dwell_time: dwell time (in milliseconds) for NAN scans. * @discovery_beacon_interval: interval (in TUs) for discovery beacons. * @enable_dw_notification: flag to enable/disable discovery window * notifications. * @band_cfgs: array of band specific configurations, indexed by * &enum nl80211_band values. * @extra_nan_attrs: pointer to additional NAN attributes. * @extra_nan_attrs_len: length of the additional NAN attributes. * @vendor_elems: pointer to vendor-specific elements. * @vendor_elems_len: length of the vendor-specific elements. */ struct cfg80211_nan_conf { u8 master_pref; u8 bands; const u8 *cluster_id; u16 scan_period; u16 scan_dwell_time; u8 discovery_beacon_interval; bool enable_dw_notification; struct cfg80211_nan_band_config band_cfgs[NUM_NL80211_BANDS]; const u8 *extra_nan_attrs; u16 extra_nan_attrs_len; const u8 *vendor_elems; u16 vendor_elems_len; }; /** * enum cfg80211_nan_conf_changes - indicates changed fields in NAN * configuration * * @CFG80211_NAN_CONF_CHANGED_PREF: master preference * @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands * @CFG80211_NAN_CONF_CHANGED_CONFIG: changed additional configuration. * When this flag is set, it indicates that some additional attribute(s) * (other then master_pref and bands) have been changed. In this case, * all the unchanged attributes will be properly configured to their * previous values. The driver doesn't need to store any * previous configuration besides master_pref and bands. */ enum cfg80211_nan_conf_changes { CFG80211_NAN_CONF_CHANGED_PREF = BIT(0), CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1), CFG80211_NAN_CONF_CHANGED_CONFIG = BIT(2), }; /** * struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter * * @filter: the content of the filter * @len: the length of the filter */ struct cfg80211_nan_func_filter { const u8 *filter; u8 len; }; /** * struct cfg80211_nan_func - a NAN function * * @type: &enum nl80211_nan_function_type * @service_id: the service ID of the function * @publish_type: &nl80211_nan_publish_type * @close_range: if true, the range should be limited. Threshold is * implementation specific. * @publish_bcast: if true, the solicited publish should be broadcasted * @subscribe_active: if true, the subscribe is active * @followup_id: the instance ID for follow up * @followup_reqid: the requester instance ID for follow up * @followup_dest: MAC address of the recipient of the follow up * @ttl: time to live counter in DW. * @serv_spec_info: Service Specific Info * @serv_spec_info_len: Service Specific Info length * @srf_include: if true, SRF is inclusive * @srf_bf: Bloom Filter * @srf_bf_len: Bloom Filter length * @srf_bf_idx: Bloom Filter index * @srf_macs: SRF MAC addresses * @srf_num_macs: number of MAC addresses in SRF * @rx_filters: rx filters that are matched with corresponding peer's tx_filter * @tx_filters: filters that should be transmitted in the SDF. * @num_rx_filters: length of &rx_filters. * @num_tx_filters: length of &tx_filters. * @instance_id: driver allocated id of the function. * @cookie: unique NAN function identifier. */ struct cfg80211_nan_func { enum nl80211_nan_function_type type; u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN]; u8 publish_type; bool close_range; bool publish_bcast; bool subscribe_active; u8 followup_id; u8 followup_reqid; struct mac_address followup_dest; u32 ttl; const u8 *serv_spec_info; u8 serv_spec_info_len; bool srf_include; const u8 *srf_bf; u8 srf_bf_len; u8 srf_bf_idx; struct mac_address *srf_macs; int srf_num_macs; struct cfg80211_nan_func_filter *rx_filters; struct cfg80211_nan_func_filter *tx_filters; u8 num_tx_filters; u8 num_rx_filters; u8 instance_id; u64 cookie; }; /** * struct cfg80211_pmk_conf - PMK configuration * * @aa: authenticator address * @pmk_len: PMK length in bytes. * @pmk: the PMK material * @pmk_r0_name: PMK-R0 Name. NULL if not applicable (i.e., the PMK * is not PMK-R0). When pmk_r0_name is not NULL, the pmk field * holds PMK-R0. */ struct cfg80211_pmk_conf { const u8 *aa; u8 pmk_len; const u8 *pmk; const u8 *pmk_r0_name; }; /** * struct cfg80211_external_auth_params - Trigger External authentication. * * Commonly used across the external auth request and event interfaces. * * @action: action type / trigger for external authentication. Only significant * for the authentication request event interface (driver to user space). * @bssid: BSSID of the peer with which the authentication has * to happen. Used by both the authentication request event and * authentication response command interface. * @ssid: SSID of the AP. Used by both the authentication request event and * authentication response command interface. * @key_mgmt_suite: AKM suite of the respective authentication. Used by the * authentication request event interface. * @status: status code, %WLAN_STATUS_SUCCESS for successful authentication, * use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you * the real status code for failures. Used only for the authentication * response command interface (user space to driver). * @pmkid: The identifier to refer a PMKSA. * @mld_addr: MLD address of the peer. Used by the authentication request event * interface. Driver indicates this to enable MLO during the authentication * offload to user space. Driver shall look at %NL80211_ATTR_MLO_SUPPORT * flag capability in NL80211_CMD_CONNECT to know whether the user space * supports enabling MLO during the authentication offload. * User space should use the address of the interface (on which the * authentication request event reported) as self MLD address. User space * and driver should use MLD addresses in RA, TA and BSSID fields of * authentication frames sent or received via cfg80211. The driver * translates the MLD addresses to/from link addresses based on the link * chosen for the authentication. */ struct cfg80211_external_auth_params { enum nl80211_external_auth_action action; u8 bssid[ETH_ALEN] __aligned(2); struct cfg80211_ssid ssid; unsigned int key_mgmt_suite; u16 status; const u8 *pmkid; u8 mld_addr[ETH_ALEN] __aligned(2); }; /** * struct cfg80211_ftm_responder_stats - FTM responder statistics * * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to * indicate the relevant values in this struct for them * @success_num: number of FTM sessions in which all frames were successfully * answered * @partial_num: number of FTM sessions in which part of frames were * successfully answered * @failed_num: number of failed FTM sessions * @asap_num: number of ASAP FTM sessions * @non_asap_num: number of non-ASAP FTM sessions * @total_duration_ms: total sessions durations - gives an indication * of how much time the responder was busy * @unknown_triggers_num: number of unknown FTM triggers - triggers from * initiators that didn't finish successfully the negotiation phase with * the responder * @reschedule_requests_num: number of FTM reschedule requests - initiator asks * for a new scheduling although it already has scheduled FTM slot * @out_of_window_triggers_num: total FTM triggers out of scheduled window */ struct cfg80211_ftm_responder_stats { u32 filled; u32 success_num; u32 partial_num; u32 failed_num; u32 asap_num; u32 non_asap_num; u64 total_duration_ms; u32 unknown_triggers_num; u32 reschedule_requests_num; u32 out_of_window_triggers_num; }; /** * struct cfg80211_pmsr_ftm_result - FTM result * @failure_reason: if this measurement failed (PMSR status is * %NL80211_PMSR_STATUS_FAILURE), this gives a more precise * reason than just "failure" * @burst_index: if reporting partial results, this is the index * in [0 .. num_bursts-1] of the burst that's being reported * @num_ftmr_attempts: number of FTM request frames transmitted * @num_ftmr_successes: number of FTM request frames acked * @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY, * fill this to indicate in how many seconds a retry is deemed possible * by the responder * @num_bursts_exp: actual number of bursts exponent negotiated * @burst_duration: actual burst duration negotiated * @ftms_per_burst: actual FTMs per burst negotiated * @lci_len: length of LCI information (if present) * @civicloc_len: length of civic location information (if present) * @lci: LCI data (may be %NULL) * @civicloc: civic location data (may be %NULL) * @rssi_avg: average RSSI over FTM action frames reported * @rssi_spread: spread of the RSSI over FTM action frames reported * @tx_rate: bitrate for transmitted FTM action frame response * @rx_rate: bitrate of received FTM action frame * @rtt_avg: average of RTTs measured (must have either this or @dist_avg) * @rtt_variance: variance of RTTs measured (note that standard deviation is * the square root of the variance) * @rtt_spread: spread of the RTTs measured * @dist_avg: average of distances (mm) measured * (must have either this or @rtt_avg) * @dist_variance: variance of distances measured (see also @rtt_variance) * @dist_spread: spread of distances measured (see also @rtt_spread) * @num_ftmr_attempts_valid: @num_ftmr_attempts is valid * @num_ftmr_successes_valid: @num_ftmr_successes is valid * @rssi_avg_valid: @rssi_avg is valid * @rssi_spread_valid: @rssi_spread is valid * @tx_rate_valid: @tx_rate is valid * @rx_rate_valid: @rx_rate is valid * @rtt_avg_valid: @rtt_avg is valid * @rtt_variance_valid: @rtt_variance is valid * @rtt_spread_valid: @rtt_spread is valid * @dist_avg_valid: @dist_avg is valid * @dist_variance_valid: @dist_variance is valid * @dist_spread_valid: @dist_spread is valid */ struct cfg80211_pmsr_ftm_result { const u8 *lci; const u8 *civicloc; unsigned int lci_len; unsigned int civicloc_len; enum nl80211_peer_measurement_ftm_failure_reasons failure_reason; u32 num_ftmr_attempts, num_ftmr_successes; s16 burst_index; u8 busy_retry_time; u8 num_bursts_exp; u8 burst_duration; u8 ftms_per_burst; s32 rssi_avg; s32 rssi_spread; struct rate_info tx_rate, rx_rate; s64 rtt_avg; s64 rtt_variance; s64 rtt_spread; s64 dist_avg; s64 dist_variance; s64 dist_spread; u16 num_ftmr_attempts_valid:1, num_ftmr_successes_valid:1, rssi_avg_valid:1, rssi_spread_valid:1, tx_rate_valid:1, rx_rate_valid:1, rtt_avg_valid:1, rtt_variance_valid:1, rtt_spread_valid:1, dist_avg_valid:1, dist_variance_valid:1, dist_spread_valid:1; }; /** * struct cfg80211_pmsr_result - peer measurement result * @addr: address of the peer * @host_time: host time (use ktime_get_boottime() adjust to the time when the * measurement was made) * @ap_tsf: AP's TSF at measurement time * @status: status of the measurement * @final: if reporting partial results, mark this as the last one; if not * reporting partial results always set this flag * @ap_tsf_valid: indicates the @ap_tsf value is valid * @type: type of the measurement reported, note that we only support reporting * one type at a time, but you can report multiple results separately and * they're all aggregated for userspace. * @ftm: FTM result */ struct cfg80211_pmsr_result { u64 host_time, ap_tsf; enum nl80211_peer_measurement_status status; u8 addr[ETH_ALEN]; u8 final:1, ap_tsf_valid:1; enum nl80211_peer_measurement_type type; union { struct cfg80211_pmsr_ftm_result ftm; }; }; /** * struct cfg80211_pmsr_ftm_request_peer - FTM request data * @requested: indicates FTM is requested * @preamble: frame preamble to use * @burst_period: burst period to use * @asap: indicates to use ASAP mode * @num_bursts_exp: number of bursts exponent * @burst_duration: burst duration * @ftms_per_burst: number of FTMs per burst * @ftmr_retries: number of retries for FTM request * @request_lci: request LCI information * @request_civicloc: request civic location information * @trigger_based: use trigger based ranging for the measurement * If neither @trigger_based nor @non_trigger_based is set, * EDCA based ranging will be used. * @non_trigger_based: use non trigger based ranging for the measurement * If neither @trigger_based nor @non_trigger_based is set, * EDCA based ranging will be used. * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either * @trigger_based or @non_trigger_based is set. * @bss_color: the bss color of the responder. Optional. Set to zero to * indicate the driver should set the BSS color. Only valid if * @non_trigger_based or @trigger_based is set. * * See also nl80211 for the respective attribute documentation. */ struct cfg80211_pmsr_ftm_request_peer { enum nl80211_preamble preamble; u16 burst_period; u8 requested:1, asap:1, request_lci:1, request_civicloc:1, trigger_based:1, non_trigger_based:1, lmr_feedback:1; u8 num_bursts_exp; u8 burst_duration; u8 ftms_per_burst; u8 ftmr_retries; u8 bss_color; }; /** * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request * @addr: MAC address * @chandef: channel to use * @report_ap_tsf: report the associated AP's TSF * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer */ struct cfg80211_pmsr_request_peer { u8 addr[ETH_ALEN]; struct cfg80211_chan_def chandef; u8 report_ap_tsf:1; struct cfg80211_pmsr_ftm_request_peer ftm; }; /** * struct cfg80211_pmsr_request - peer measurement request * @cookie: cookie, set by cfg80211 * @nl_portid: netlink portid - used by cfg80211 * @drv_data: driver data for this request, if required for aborting, * not otherwise freed or anything by cfg80211 * @mac_addr: MAC address used for (randomised) request * @mac_addr_mask: MAC address mask used for randomisation, bits that * are 0 in the mask should be randomised, bits that are 1 should * be taken from the @mac_addr * @list: used by cfg80211 to hold on to the request * @timeout: timeout (in milliseconds) for the whole operation, if * zero it means there's no timeout * @n_peers: number of peers to do measurements with * @peers: per-peer measurement request data */ struct cfg80211_pmsr_request { u64 cookie; void *drv_data; u32 n_peers; u32 nl_portid; u32 timeout; u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); struct list_head list; struct cfg80211_pmsr_request_peer peers[] __counted_by(n_peers); }; /** * struct cfg80211_update_owe_info - OWE Information * * This structure provides information needed for the drivers to offload OWE * (Opportunistic Wireless Encryption) processing to the user space. * * Commonly used across update_owe_info request and event interfaces. * * @peer: MAC address of the peer device for which the OWE processing * has to be done. * @status: status code, %WLAN_STATUS_SUCCESS for successful OWE info * processing, use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space * cannot give you the real status code for failures. Used only for * OWE update request command interface (user space to driver). * @ie: IEs obtained from the peer or constructed by the user space. These are * the IEs of the remote peer in the event from the host driver and * the constructed IEs by the user space in the request interface. * @ie_len: Length of IEs in octets. * @assoc_link_id: MLO link ID of the AP, with which (re)association requested * by peer. This will be filled by driver for both MLO and non-MLO station * connections when the AP affiliated with an MLD. For non-MLD AP mode, it * will be -1. Used only with OWE update event (driver to user space). * @peer_mld_addr: For MLO connection, MLD address of the peer. For non-MLO * connection, it will be all zeros. This is applicable only when * @assoc_link_id is not -1, i.e., the AP affiliated with an MLD. Used only * with OWE update event (driver to user space). */ struct cfg80211_update_owe_info { u8 peer[ETH_ALEN] __aligned(2); u16 status; const u8 *ie; size_t ie_len; int assoc_link_id; u8 peer_mld_addr[ETH_ALEN] __aligned(2); }; /** * struct mgmt_frame_regs - management frame registrations data * @global_stypes: bitmap of management frame subtypes registered * for the entire device * @interface_stypes: bitmap of management frame subtypes registered * for the given interface * @global_mcast_stypes: mcast RX is needed globally for these subtypes * @interface_mcast_stypes: mcast RX is needed on this interface * for these subtypes */ struct mgmt_frame_regs { u32 global_stypes, interface_stypes; u32 global_mcast_stypes, interface_mcast_stypes; }; /** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks * in order to handle configuration requests on their interfaces. * * All callbacks except where otherwise noted should return 0 * on success or a negative error code. * * All operations are invoked with the wiphy mutex held. The RTNL may be * held in addition (due to wireless extensions) but this cannot be relied * upon except in cases where documented below. Note that due to ordering, * the RTNL also cannot be acquired in any handlers. * * @suspend: wiphy device needs to be suspended. The variable @wow will * be %NULL or contain the enabled Wake-on-Wireless triggers that are * configured for the device. * @resume: wiphy device needs to be resumed * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback * to call device_set_wakeup_enable() to enable/disable wakeup from * the device. * * @add_virtual_intf: create a new virtual interface with the given name, * must set the struct wireless_dev's iftype. Beware: You must create * the new netdev in the wiphy's network namespace! Returns the struct * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must * also set the address member in the wdev. * This additionally holds the RTNL to be able to do netdev changes. * * @del_virtual_intf: remove the virtual interface * This additionally holds the RTNL to be able to do netdev changes. * * @change_virtual_intf: change type/configuration of virtual interface, * keep the struct wireless_dev's iftype updated. * This additionally holds the RTNL to be able to do netdev changes. * * @add_intf_link: Add a new MLO link to the given interface. Note that * the wdev->link[] data structure has been updated, so the new link * address is available. * @del_intf_link: Remove an MLO link from the given interface. * * @add_key: add a key with the given parameters. @mac_addr will be %NULL * when adding a group key. @link_id will be -1 for non-MLO connection. * For MLO connection, @link_id will be >= 0 for group key and -1 for * pairwise key, @mac_addr will be peer's MLD address for MLO pairwise key. * * @get_key: get information about the key with the given parameters. * @mac_addr will be %NULL when requesting information for a group * key. All pointers given to the @callback function need not be valid * after it returns. This function should return an error if it is * not possible to retrieve the key, -ENOENT if it doesn't exist. * @link_id will be -1 for non-MLO connection. For MLO connection, * @link_id will be >= 0 for group key and -1 for pairwise key, @mac_addr * will be peer's MLD address for MLO pairwise key. * * @del_key: remove a key given the @mac_addr (%NULL for a group key) * and @key_index, return -ENOENT if the key doesn't exist. @link_id will * be -1 for non-MLO connection. For MLO connection, @link_id will be >= 0 * for group key and -1 for pairwise key, @mac_addr will be peer's MLD * address for MLO pairwise key. * * @set_default_key: set the default key on an interface. @link_id will be >= 0 * for MLO connection and -1 for non-MLO connection. * * @set_default_mgmt_key: set the default management frame key on an interface. * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection. * * @set_default_beacon_key: set the default Beacon frame key on an interface. * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection. * * @set_rekey_data: give the data necessary for GTK rekeying to the driver * * @start_ap: Start acting in AP mode defined by the parameters. * @change_beacon: Change the beacon parameters for an access point mode * interface. This should reject the call when AP mode wasn't started. * @stop_ap: Stop being an AP, including stopping beaconing. * * @add_station: Add a new station. * @del_station: Remove a station * @change_station: Modify a given station. Note that flags changes are not much * validated in cfg80211, in particular the auth/assoc/authorized flags * might come to the driver in invalid combinations -- make sure to check * them, also against the existing state! Drivers must call * cfg80211_check_station_change() to validate the information. * @get_station: get station information for the station identified by @mac * @dump_station: dump station callback -- resume dump at index @idx * * @add_mpath: add a fixed mesh path * @del_mpath: delete a given mesh path * @change_mpath: change a given mesh path * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx * @get_mpp: get a mesh proxy path for the given parameters * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx * @join_mesh: join the mesh network with the specified parameters * (invoked with the wireless_dev mutex held) * @leave_mesh: leave the current mesh network * (invoked with the wireless_dev mutex held) * * @get_mesh_config: Get the current mesh configuration * * @update_mesh_config: Update mesh parameters on a running mesh. * The mask is a bitfield which tells us which parameters to * set, and which to leave alone. * * @change_bss: Modify parameters for a given BSS. * * @inform_bss: Called by cfg80211 while being informed about new BSS data * for every BSS found within the reported data or frame. This is called * from within the cfg8011 inform_bss handlers while holding the bss_lock. * The data parameter is passed through from drv_data inside * struct cfg80211_inform_bss. * The new IE data for the BSS is explicitly passed. * * @set_txq_params: Set TX queue parameters * * @libertas_set_mesh_channel: Only for backward compatibility for libertas, * as it doesn't implement join_mesh and needs to set the channel to * join the mesh instead. * * @set_monitor_channel: Set the monitor mode channel for the device. If other * interfaces are active this callback should reject the configuration. * If no interfaces are active or the device is down, the channel should * be stored for when a monitor interface becomes active. * * @scan: Request to do a scan. If returning zero, the scan request is given * the driver, and will be valid until passed to cfg80211_scan_done(). * For scan results, call cfg80211_inform_bss(); you can call this outside * the scan/scan_done bracket too. * @abort_scan: Tell the driver to abort an ongoing scan. The driver shall * indicate the status of the scan through cfg80211_scan_done(). * * @auth: Request to authenticate with the specified peer * (invoked with the wireless_dev mutex held) * @assoc: Request to (re)associate with the specified peer * (invoked with the wireless_dev mutex held) * @deauth: Request to deauthenticate from the specified peer * (invoked with the wireless_dev mutex held) * @disassoc: Request to disassociate from the specified peer * (invoked with the wireless_dev mutex held) * * @connect: Connect to the ESS with the specified parameters. When connected, * call cfg80211_connect_result()/cfg80211_connect_bss() with status code * %WLAN_STATUS_SUCCESS. If the connection fails for some reason, call * cfg80211_connect_result()/cfg80211_connect_bss() with the status code * from the AP or cfg80211_connect_timeout() if no frame with status code * was received. * The driver is allowed to roam to other BSSes within the ESS when the * other BSS matches the connect parameters. When such roaming is initiated * by the driver, the driver is expected to verify that the target matches * the configured security parameters and to use Reassociation Request * frame instead of Association Request frame. * The connect function can also be used to request the driver to perform a * specific roam when connected to an ESS. In that case, the prev_bssid * parameter is set to the BSSID of the currently associated BSS as an * indication of requesting reassociation. * In both the driver-initiated and new connect() call initiated roaming * cases, the result of roaming is indicated with a call to * cfg80211_roamed(). (invoked with the wireless_dev mutex held) * @update_connect_params: Update the connect parameters while connected to a * BSS. The updated parameters can be used by driver/firmware for * subsequent BSS selection (roaming) decisions and to form the * Authentication/(Re)Association Request frames. This call does not * request an immediate disassociation or reassociation with the current * BSS, i.e., this impacts only subsequent (re)associations. The bits in * changed are defined in &enum cfg80211_connect_params_changed. * (invoked with the wireless_dev mutex held) * @disconnect: Disconnect from the BSS/ESS or stop connection attempts if * connection is in progress. Once done, call cfg80211_disconnected() in * case connection was already established (invoked with the * wireless_dev mutex held), otherwise call cfg80211_connect_timeout(). * * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call * cfg80211_ibss_joined(), also call that function when changing BSSID due * to a merge. * (invoked with the wireless_dev mutex held) * @leave_ibss: Leave the IBSS. * (invoked with the wireless_dev mutex held) * * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or * MESH mode) * * @set_wiphy_params: Notify that wiphy parameters have changed; * @changed bitfield (see &enum wiphy_params_flags) describes which values * have changed. The actual parameter values are available in * struct wiphy. If returning an error, no value should be changed. * * @set_tx_power: set the transmit power according to the parameters, * the power passed is in mBm, to get dBm use MBM_TO_DBM(). The * wdev may be %NULL if power was set for the wiphy, and will * always be %NULL unless the driver supports per-vif TX power * (as advertised by the nl80211 feature flag.) * @get_tx_power: store the current TX power into the dbm variable; * return 0 if successful * * @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting * functions to adjust rfkill hw state * * @dump_survey: get site survey information. * * @remain_on_channel: Request the driver to remain awake on the specified * channel for the specified duration to complete an off-channel * operation (e.g., public action frame exchange). When the driver is * ready on the requested channel, it must indicate this with an event * notification by calling cfg80211_ready_on_channel(). * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. * This allows the operation to be terminated prior to timeout based on * the duration value. * @mgmt_tx: Transmit a management frame. * @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management * frame on another channel * * @testmode_cmd: run a test mode command; @wdev may be %NULL * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be * used by the function, but 0 and 1 must not be touched. Additionally, * return error codes other than -ENOBUFS and -ENOENT will terminate the * dump and return to userspace with an error, so be careful. If any data * was passed in from userspace then the data/len arguments will be present * and point to the data contained in %NL80211_ATTR_TESTDATA. * * @set_bitrate_mask: set the bitrate mask configuration * * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac * devices running firmwares capable of generating the (re) association * RSN IE. It allows for faster roaming between WPA2 BSSIDs. * @del_pmksa: Delete a cached PMKID. * @flush_pmksa: Flush all cached PMKIDs. * @set_power_mgmt: Configure WLAN power management. A timeout value of -1 * allows the driver to adjust the dynamic ps timeout value. * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. * After configuration, the driver should (soon) send an event indicating * the current level is above/below the configured threshold; this may * need some care when the configuration is changed (without first being * disabled.) * @set_cqm_rssi_range_config: Configure two RSSI thresholds in the * connection quality monitor. An event is to be sent only when the * signal level is found to be outside the two values. The driver should * set %NL80211_EXT_FEATURE_CQM_RSSI_LIST if this method is implemented. * If it is provided then there's no point providing @set_cqm_rssi_config. * @set_cqm_txe_config: Configure connection quality monitor TX error * thresholds. * @sched_scan_start: Tell the driver to start a scheduled scan. * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan with * given request id. This call must stop the scheduled scan and be ready * for starting a new one before it returns, i.e. @sched_scan_start may be * called immediately after that again and should not fail in that case. * The driver should not call cfg80211_sched_scan_stopped() for a requested * stop (when this method returns 0). * * @update_mgmt_frame_registrations: Notify the driver that management frame * registrations were updated. The callback is allowed to sleep. * * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may * reject TX/RX mask combinations they cannot support by returning -EINVAL * (also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX). * * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant). * * @tdls_mgmt: Transmit a TDLS management frame. * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup). * * @probe_client: probe an associated client, must return a cookie that it * later passes to cfg80211_probe_status(). * * @set_noack_map: Set the NoAck Map for the TIDs. * * @get_channel: Get the current operating channel for the virtual interface. * For monitor interfaces, it should return %NULL unless there's a single * current monitoring channel. * * @start_p2p_device: Start the given P2P device. * @stop_p2p_device: Stop the given P2P device. * * @set_mac_acl: Sets MAC address control list in AP and P2P GO mode. * Parameters include ACL policy, an array of MAC address of stations * and the number of MAC addresses. If there is already a list in driver * this new list replaces the existing one. Driver has to clear its ACL * when number of MAC addresses entries is passed as 0. Drivers which * advertise the support for MAC based ACL have to implement this callback. * * @start_radar_detection: Start radar detection in the driver. * * @end_cac: End running CAC, probably because a related CAC * was finished on another phy. * * @update_ft_ies: Provide updated Fast BSS Transition information to the * driver. If the SME is in the driver/firmware, this information can be * used in building Authentication and Reassociation Request frames. * * @crit_proto_start: Indicates a critical protocol needs more link reliability * for a given duration (milliseconds). The protocol is provided so the * driver can take the most appropriate actions. * @crit_proto_stop: Indicates critical protocol no longer needs increased link * reliability. This operation can not fail. * @set_coalesce: Set coalesce parameters. * * @channel_switch: initiate channel-switch procedure (with CSA). Driver is * responsible for veryfing if the switch is possible. Since this is * inherently tricky driver may decide to disconnect an interface later * with cfg80211_stop_iface(). This doesn't mean driver can accept * everything. It should do it's best to verify requests and reject them * as soon as possible. * * @set_qos_map: Set QoS mapping information to the driver * * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the * given interface This is used e.g. for dynamic HT 20/40 MHz channel width * changes during the lifetime of the BSS. * * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device * with the given parameters; action frame exchange has been handled by * userspace so this just has to modify the TX path to take the TS into * account. * If the admitted time is 0 just validate the parameters to make sure * the session can be created at all; it is valid to just always return * success for that but that may result in inefficient behaviour (handshake * with the peer followed by immediate teardown when the addition is later * rejected) * @del_tx_ts: remove an existing TX TS * * @join_ocb: join the OCB network with the specified parameters * (invoked with the wireless_dev mutex held) * @leave_ocb: leave the current OCB network * (invoked with the wireless_dev mutex held) * * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver * is responsible for continually initiating channel-switching operations * and returning to the base channel for communication with the AP. * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both * peers must be on the base channel when the call completes. * @start_nan: Start the NAN interface. * @stop_nan: Stop the NAN interface. * @add_nan_func: Add a NAN function. Returns negative value on failure. * On success @nan_func ownership is transferred to the driver and * it may access it outside of the scope of this function. The driver * should free the @nan_func when no longer needed by calling * cfg80211_free_nan_func(). * On success the driver should assign an instance_id in the * provided @nan_func. * @del_nan_func: Delete a NAN function. * @nan_change_conf: changes NAN configuration. The changed parameters must * be specified in @changes (using &enum cfg80211_nan_conf_changes); * All other parameters must be ignored. * * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS * * @get_txq_stats: Get TXQ stats for interface or phy. If wdev is %NULL, this * function should return phy stats, and interface stats otherwise. * * @set_pmk: configure the PMK to be used for offloaded 802.1X 4-Way handshake. * If not deleted through @del_pmk the PMK remains valid until disconnect * upon which the driver should clear it. * (invoked with the wireless_dev mutex held) * @del_pmk: delete the previously configured PMK for the given authenticator. * (invoked with the wireless_dev mutex held) * * @external_auth: indicates result of offloaded authentication processing from * user space * * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter * tells the driver that the frame should not be encrypted. * * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. * Statistics should be cumulative, currently no way to reset is provided. * @start_pmsr: start peer measurement (e.g. FTM) * @abort_pmsr: abort peer measurement * * @update_owe_info: Provide updated OWE info to driver. Driver implementing SME * but offloading OWE processing to the user space will get the updated * DH IE through this interface. * * @probe_mesh_link: Probe direct Mesh peer's link quality by sending data frame * and overrule HWMP path selection algorithm. * @set_tid_config: TID specific configuration, this can be peer or BSS specific * This callback may sleep. * @reset_tid_config: Reset TID specific configuration for the peer, for the * given TIDs. This callback may sleep. * * @set_sar_specs: Update the SAR (TX power) settings. * * @color_change: Initiate a color change. * * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use * those to decrypt (Re)Association Request and encrypt (Re)Association * Response frame. * * @set_radar_background: Configure dedicated offchannel chain available for * radar/CAC detection on some hw. This chain can't be used to transmit * or receive frames and it is bounded to a running wdev. * Background radar/CAC detection allows to avoid the CAC downtime * switching to a different channel during CAC detection on the selected * radar channel. * The caller is expected to set chandef pointer to NULL in order to * disable background CAC/radar detection. * @add_link_station: Add a link to a station. * @mod_link_station: Modify a link of a station. * @del_link_station: Remove a link of a station. * * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames. * @set_ttlm: set the TID to link mapping. * @set_epcs: Enable/Disable EPCS for station mode. * @get_radio_mask: get bitmask of radios in use. * (invoked with the wiphy mutex held) * @assoc_ml_reconf: Request a non-AP MLO connection to perform ML * reconfiguration, i.e., add and/or remove links to/from the * association using ML reconfiguration action frames. Successfully added * links will be added to the set of valid links. Successfully removed * links will be removed from the set of valid links. The driver must * indicate removed links by calling cfg80211_links_removed() and added * links by calling cfg80211_mlo_reconf_add_done(). When calling * cfg80211_mlo_reconf_add_done() the bss pointer must be given for each * link for which MLO reconfiguration 'add' operation was requested. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); int (*resume)(struct wiphy *wiphy); void (*set_wakeup)(struct wiphy *wiphy, bool enabled); struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params); int (*del_virtual_intf)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*change_virtual_intf)(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params); int (*add_intf_link)(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id); void (*del_intf_link)(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id); int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params); int (*get_key)(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params*)); int (*del_key)(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr); int (*set_default_key)(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool unicast, bool multicast); int (*set_default_mgmt_key)(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index); int (*set_default_beacon_key)(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index); int (*start_ap)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings); int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_update *info); int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id); int (*add_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params); int (*del_station)(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params); int (*change_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params); int (*get_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo); int (*dump_station)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo); int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop); int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev, const u8 *dst); int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev, const u8 *dst, const u8 *next_hop); int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo); int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo); int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *mpp, struct mpath_info *pinfo); int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo); int (*get_mesh_config)(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf); int (*update_mesh_config)(struct wiphy *wiphy, struct net_device *dev, u32 mask, const struct mesh_config *nconf); int (*join_mesh)(struct wiphy *wiphy, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup); int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev); int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev, struct ocb_setup *setup); int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev); int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params); void (*inform_bss)(struct wiphy *wiphy, struct cfg80211_bss *bss, const struct cfg80211_bss_ies *ies, void *data); int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_txq_params *params); int (*libertas_set_mesh_channel)(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan); int (*set_monitor_channel)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef); int (*scan)(struct wiphy *wiphy, struct cfg80211_scan_request *request); void (*abort_scan)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*auth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_auth_request *req); int (*assoc)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_assoc_request *req); int (*deauth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_deauth_request *req); int (*disassoc)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_disassoc_request *req); int (*connect)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme); int (*update_connect_params)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme, u32 changed); int (*disconnect)(struct wiphy *wiphy, struct net_device *dev, u16 reason_code); int (*join_ibss)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params); int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev); int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev, int rate[NUM_NL80211_BANDS]); int (*set_wiphy_params)(struct wiphy *wiphy, int radio_idx, u32 changed); int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, int radio_idx, enum nl80211_tx_power_setting type, int mbm); int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, int radio_idx, unsigned int link_id, int *dbm); void (*rfkill_poll)(struct wiphy *wiphy); #ifdef CONFIG_NL80211_TESTMODE int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len); int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len); #endif int (*set_bitrate_mask)(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id, const u8 *peer, const struct cfg80211_bitrate_mask *mask); int (*dump_survey)(struct wiphy *wiphy, struct net_device *netdev, int idx, struct survey_info *info); int (*set_pmksa)(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa); int (*del_pmksa)(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa); int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev); int (*remain_on_channel)(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie); int (*cancel_remain_on_channel)(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int (*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout); int (*set_cqm_rssi_config)(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst); int (*set_cqm_rssi_range_config)(struct wiphy *wiphy, struct net_device *dev, s32 rssi_low, s32 rssi_high); int (*set_cqm_txe_config)(struct wiphy *wiphy, struct net_device *dev, u32 rate, u32 pkts, u32 intvl); void (*update_mgmt_frame_registrations)(struct wiphy *wiphy, struct wireless_dev *wdev, struct mgmt_frame_regs *upd); int (*set_antenna)(struct wiphy *wiphy, int radio_idx, u32 tx_ant, u32 rx_ant); int (*get_antenna)(struct wiphy *wiphy, int radio_idx, u32 *tx_ant, u32 *rx_ant); int (*sched_scan_start)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *request); int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev, u64 reqid); int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_gtk_rekey_data *data); int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, int link_id, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len); int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); int (*probe_client)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u64 *cookie); int (*set_noack_map)(struct wiphy *wiphy, struct net_device *dev, u16 noack_map); int (*get_channel)(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id, struct cfg80211_chan_def *chandef); int (*start_p2p_device)(struct wiphy *wiphy, struct wireless_dev *wdev); void (*stop_p2p_device)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*set_mac_acl)(struct wiphy *wiphy, struct net_device *dev, const struct cfg80211_acl_data *params); int (*start_radar_detection)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms, int link_id); void (*end_cac)(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id); int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie); int (*crit_proto_start)(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_crit_proto_id protocol, u16 duration); void (*crit_proto_stop)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*set_coalesce)(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce); int (*channel_switch)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params); int (*set_qos_map)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_qos_map *qos_map); int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id, struct cfg80211_chan_def *chandef); int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time); int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer); int (*tdls_channel_switch)(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef); void (*tdls_cancel_channel_switch)(struct wiphy *wiphy, struct net_device *dev, const u8 *addr); int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf); void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev); int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_func *nan_func); void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int (*nan_change_conf)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes); int (*set_multicast_to_unicast)(struct wiphy *wiphy, struct net_device *dev, const bool enabled); int (*get_txq_stats)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_txq_stats *txqstats); int (*set_pmk)(struct wiphy *wiphy, struct net_device *dev, const struct cfg80211_pmk_conf *conf); int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev, const u8 *aa); int (*external_auth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_external_auth_params *params); int (*tx_control_port)(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, const __be16 proto, const bool noencrypt, int link_id, u64 *cookie); int (*get_ftm_responder_stats)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats); int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request); void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request); int (*update_owe_info)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_update_owe_info *owe_info); int (*probe_mesh_link)(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len); int (*set_tid_config)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_tid_config *tid_conf); int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 tids); int (*set_sar_specs)(struct wiphy *wiphy, struct cfg80211_sar_specs *sar); int (*color_change)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_color_change_settings *params); int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_fils_aad *fils_aad); int (*set_radar_background)(struct wiphy *wiphy, struct cfg80211_chan_def *chandef); int (*add_link_station)(struct wiphy *wiphy, struct net_device *dev, struct link_station_parameters *params); int (*mod_link_station)(struct wiphy *wiphy, struct net_device *dev, struct link_station_parameters *params); int (*del_link_station)(struct wiphy *wiphy, struct net_device *dev, struct link_station_del_parameters *params); int (*set_hw_timestamp)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_set_hw_timestamp *hwts); int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ttlm_params *params); u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev); int (*assoc_ml_reconf)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ml_reconf_req *req); int (*set_epcs)(struct wiphy *wiphy, struct net_device *dev, bool val); }; /* * wireless hardware and networking interfaces structures * and registration/helper functions */ /** * enum wiphy_flags - wiphy capability flags * * @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split * into two, first for legacy bands and second for 6 GHz. * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this * wiphy at all * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled * by default -- this flag will be set depending on the kernel's default * on wiphy_new(), but can be changed by the driver if it has a good * reason to override the default * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station * on a VLAN interface). This flag also serves an extra purpose of * supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype. * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the * control port protocol ethertype. The device also honours the * control_port_no_encrypt flag. * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH. * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the * firmware. * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP. * @WIPHY_FLAG_SUPPORTS_TDLS: The device supports TDLS (802.11z) operation. * @WIPHY_FLAG_TDLS_EXTERNAL_SETUP: The device does not handle TDLS (802.11z) * link setup/discovery operations internally. Setup, discovery and * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be * used for asking the driver/firmware to perform a TDLS operation. * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes * when there are virtual interfaces in AP mode by calling * cfg80211_report_obss_beacon(). * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device * responds to probe-requests in hardware. * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in * beaconing mode (AP, IBSS, Mesh, ...). * @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys * @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs, * in order to not have them reachable in normal drivers, until we have * complete feature/interface combinations/etc. advertisement. No driver * should set this flag for now. * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys. * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for * NL80211_REGDOM_SET_BY_DRIVER. * @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver * set this flag to update channels on beacon hints. * @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link * of an NSTR mobile AP MLD. * @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device */ enum wiphy_flags { WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0), WIPHY_FLAG_SUPPORTS_MLO = BIT(1), WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2), WIPHY_FLAG_NETNS_OK = BIT(3), WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), WIPHY_FLAG_4ADDR_AP = BIT(5), WIPHY_FLAG_4ADDR_STATION = BIT(6), WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7), WIPHY_FLAG_IBSS_RSN = BIT(8), WIPHY_FLAG_DISABLE_WEXT = BIT(9), WIPHY_FLAG_MESH_AUTH = BIT(10), WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11), WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12), WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), WIPHY_FLAG_AP_UAPSD = BIT(14), WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16), WIPHY_FLAG_HAVE_AP_SME = BIT(17), WIPHY_FLAG_REPORTS_OBSS = BIT(18), WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), WIPHY_FLAG_OFFCHAN_TX = BIT(20), WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24), WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = BIT(25), }; /** * struct ieee80211_iface_limit - limit on certain interface types * @max: maximum number of interfaces of these types * @types: interface types (bits) */ struct ieee80211_iface_limit { u16 max; u16 types; }; /** * struct ieee80211_iface_combination - possible interface combination * * With this structure the driver can describe which interface * combinations it supports concurrently. When set in a struct wiphy_radio, * the combinations refer to combinations of interfaces currently active on * that radio. * * Examples: * * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total: * * .. code-block:: c * * struct ieee80211_iface_limit limits1[] = { * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, * { .max = 1, .types = BIT(NL80211_IFTYPE_AP), }, * }; * struct ieee80211_iface_combination combination1 = { * .limits = limits1, * .n_limits = ARRAY_SIZE(limits1), * .max_interfaces = 2, * .beacon_int_infra_match = true, * }; * * * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total: * * .. code-block:: c * * struct ieee80211_iface_limit limits2[] = { * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) | * BIT(NL80211_IFTYPE_P2P_GO), }, * }; * struct ieee80211_iface_combination combination2 = { * .limits = limits2, * .n_limits = ARRAY_SIZE(limits2), * .max_interfaces = 8, * .num_different_channels = 1, * }; * * * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total. * * This allows for an infrastructure connection and three P2P connections. * * .. code-block:: c * * struct ieee80211_iface_limit limits3[] = { * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) | * BIT(NL80211_IFTYPE_P2P_CLIENT), }, * }; * struct ieee80211_iface_combination combination3 = { * .limits = limits3, * .n_limits = ARRAY_SIZE(limits3), * .max_interfaces = 4, * .num_different_channels = 2, * }; * */ struct ieee80211_iface_combination { /** * @limits: * limits for the given interface types */ const struct ieee80211_iface_limit *limits; /** * @num_different_channels: * can use up to this many different channels */ u32 num_different_channels; /** * @max_interfaces: * maximum number of interfaces in total allowed in this group */ u16 max_interfaces; /** * @n_limits: * number of limitations */ u8 n_limits; /** * @beacon_int_infra_match: * In this combination, the beacon intervals between infrastructure * and AP types must match. This is required only in special cases. */ bool beacon_int_infra_match; /** * @radar_detect_widths: * bitmap of channel widths supported for radar detection */ u8 radar_detect_widths; /** * @radar_detect_regions: * bitmap of regions supported for radar detection */ u8 radar_detect_regions; /** * @beacon_int_min_gcd: * This interface combination supports different beacon intervals. * * = 0 * all beacon intervals for different interface must be same. * > 0 * any beacon interval for the interface part of this combination AND * GCD of all beacon intervals from beaconing interfaces of this * combination must be greater or equal to this value. */ u32 beacon_int_min_gcd; }; struct ieee80211_txrx_stypes { u16 tx, rx; }; /** * enum wiphy_wowlan_support_flags - WoWLAN support flags * @WIPHY_WOWLAN_ANY: supports wakeup for the special "any" * trigger that keeps the device operating as-is and * wakes up the host on any activity, for example a * received packet that passed filtering; note that the * packet should be preserved in that case * @WIPHY_WOWLAN_MAGIC_PKT: supports wakeup on magic packet * (see nl80211.h) * @WIPHY_WOWLAN_DISCONNECT: supports wakeup on disconnect * @WIPHY_WOWLAN_SUPPORTS_GTK_REKEY: supports GTK rekeying while asleep * @WIPHY_WOWLAN_GTK_REKEY_FAILURE: supports wakeup on GTK rekey failure * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection */ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_ANY = BIT(0), WIPHY_WOWLAN_MAGIC_PKT = BIT(1), WIPHY_WOWLAN_DISCONNECT = BIT(2), WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = BIT(3), WIPHY_WOWLAN_GTK_REKEY_FAILURE = BIT(4), WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5), WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6), WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7), WIPHY_WOWLAN_NET_DETECT = BIT(8), }; struct wiphy_wowlan_tcp_support { const struct nl80211_wowlan_tcp_data_token_feature *tok; u32 data_payload_max; u32 data_interval_max; u32 wake_payload_max; bool seq; }; /** * struct wiphy_wowlan_support - WoWLAN support data * @flags: see &enum wiphy_wowlan_support_flags * @n_patterns: number of supported wakeup patterns * (see nl80211.h for the pattern definition) * @pattern_max_len: maximum length of each pattern * @pattern_min_len: minimum length of each pattern * @max_pkt_offset: maximum Rx packet offset * @max_nd_match_sets: maximum number of matchsets for net-detect, * similar, but not necessarily identical, to max_match_sets for * scheduled scans. * See &struct cfg80211_sched_scan_request.@match_sets for more * details. * @tcp: TCP wakeup support information */ struct wiphy_wowlan_support { u32 flags; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; int max_nd_match_sets; const struct wiphy_wowlan_tcp_support *tcp; }; /** * struct wiphy_coalesce_support - coalesce support data * @n_rules: maximum number of coalesce rules * @max_delay: maximum supported coalescing delay in msecs * @n_patterns: number of supported patterns in a rule * (see nl80211.h for the pattern definition) * @pattern_max_len: maximum length of each pattern * @pattern_min_len: minimum length of each pattern * @max_pkt_offset: maximum Rx packet offset */ struct wiphy_coalesce_support { int n_rules; int max_delay; int n_patterns; int pattern_max_len; int pattern_min_len; int max_pkt_offset; }; /** * enum wiphy_vendor_command_flags - validation flags for vendor commands * @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev * @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev * @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running * (must be combined with %_WDEV or %_NETDEV) */ enum wiphy_vendor_command_flags { WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0), WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1), WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2), }; /** * enum wiphy_opmode_flag - Station's ht/vht operation mode information flags * * @STA_OPMODE_MAX_BW_CHANGED: Max Bandwidth changed * @STA_OPMODE_SMPS_MODE_CHANGED: SMPS mode changed * @STA_OPMODE_N_SS_CHANGED: max N_SS (number of spatial streams) changed * */ enum wiphy_opmode_flag { STA_OPMODE_MAX_BW_CHANGED = BIT(0), STA_OPMODE_SMPS_MODE_CHANGED = BIT(1), STA_OPMODE_N_SS_CHANGED = BIT(2), }; /** * struct sta_opmode_info - Station's ht/vht operation mode information * @changed: contains value from &enum wiphy_opmode_flag * @smps_mode: New SMPS mode value from &enum nl80211_smps_mode of a station * @bw: new max bandwidth value from &enum nl80211_chan_width of a station * @rx_nss: new rx_nss value of a station */ struct sta_opmode_info { u32 changed; enum nl80211_smps_mode smps_mode; enum nl80211_chan_width bw; u8 rx_nss; }; #define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA)) /** * struct wiphy_vendor_command - vendor command definition * @info: vendor command identifying information, as used in nl80211 * @flags: flags, see &enum wiphy_vendor_command_flags * @doit: callback for the operation, note that wdev is %NULL if the * flags didn't ask for a wdev and non-%NULL otherwise; the data * pointer may be %NULL if userspace provided no data at all * @dumpit: dump callback, for transferring bigger/multiple items. The * @storage points to cb->args[5], ie. is preserved over the multiple * dumpit calls. * @policy: policy pointer for attributes within %NL80211_ATTR_VENDOR_DATA. * Set this to %VENDOR_CMD_RAW_DATA if no policy can be given and the * attribute is just raw data (e.g. a firmware command). * @maxattr: highest attribute number in policy * It's recommended to not have the same sub command with both @doit and * @dumpit, so that userspace can assume certain ones are get and others * are used with dump requests. */ struct wiphy_vendor_command { struct nl80211_vendor_cmd_info info; u32 flags; int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len); int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev, struct sk_buff *skb, const void *data, int data_len, unsigned long *storage); const struct nla_policy *policy; unsigned int maxattr; }; /** * struct wiphy_iftype_ext_capab - extended capabilities per interface type * @iftype: interface type * @extended_capabilities: extended capabilities supported by the driver, * additional capabilities might be supported by userspace; these are the * 802.11 extended capabilities ("Extended Capabilities element") and are * in the same format as in the information element. See IEEE Std * 802.11-2012 8.4.2.29 for the defined fields. * @extended_capabilities_mask: mask of the valid values * @extended_capabilities_len: length of the extended capabilities * @eml_capabilities: EML capabilities (for MLO) * @mld_capa_and_ops: MLD capabilities and operations (for MLO) */ struct wiphy_iftype_ext_capab { enum nl80211_iftype iftype; const u8 *extended_capabilities; const u8 *extended_capabilities_mask; u8 extended_capabilities_len; u16 eml_capabilities; u16 mld_capa_and_ops; }; /** * cfg80211_get_iftype_ext_capa - lookup interface type extended capability * @wiphy: the wiphy to look up from * @type: the interface type to look up * * Return: The extended capability for the given interface @type, may be %NULL */ const struct wiphy_iftype_ext_capab * cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type); /** * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities * @max_peers: maximum number of peers in a single measurement * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement * @randomize_mac_addr: can randomize MAC address for measurement * @ftm: FTM measurement data * @ftm.supported: FTM measurement is supported * @ftm.asap: ASAP-mode is supported * @ftm.non_asap: non-ASAP-mode is supported * @ftm.request_lci: can request LCI data * @ftm.request_civicloc: can request civic location data * @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble) * @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width) * @ftm.max_bursts_exponent: maximum burst exponent supported * (set to -1 if not limited; note that setting this will necessarily * forbid using the value 15 to let the responder pick) * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if * not limited) * @ftm.trigger_based: trigger based ranging measurement is supported * @ftm.non_trigger_based: non trigger based ranging measurement is supported */ struct cfg80211_pmsr_capabilities { unsigned int max_peers; u8 report_ap_tsf:1, randomize_mac_addr:1; struct { u32 preambles; u32 bandwidths; s8 max_bursts_exponent; u8 max_ftms_per_burst; u8 supported:1, asap:1, non_asap:1, request_lci:1, request_civicloc:1, trigger_based:1, non_trigger_based:1; } ftm; }; /** * struct wiphy_iftype_akm_suites - This structure encapsulates supported akm * suites for interface types defined in @iftypes_mask. Each type in the * @iftypes_mask must be unique across all instances of iftype_akm_suites. * * @iftypes_mask: bitmask of interfaces types * @akm_suites: points to an array of supported akm suites * @n_akm_suites: number of supported AKM suites */ struct wiphy_iftype_akm_suites { u16 iftypes_mask; const u32 *akm_suites; int n_akm_suites; }; /** * struct wiphy_radio_cfg - physical radio config of a wiphy * This structure describes the configurations of a physical radio in a * wiphy. It is used to denote per-radio attributes belonging to a wiphy. * * @rts_threshold: RTS threshold (dot11RTSThreshold); * -1 (default) = RTS/CTS disabled */ struct wiphy_radio_cfg { u32 rts_threshold; }; /** * struct wiphy_radio_freq_range - wiphy frequency range * @start_freq: start range edge frequency (kHz) * @end_freq: end range edge frequency (kHz) */ struct wiphy_radio_freq_range { u32 start_freq; u32 end_freq; }; /** * struct wiphy_radio - physical radio of a wiphy * This structure describes a physical radio belonging to a wiphy. * It is used to describe concurrent-channel capabilities. Only one channel * can be active on the radio described by struct wiphy_radio. * * @freq_range: frequency range that the radio can operate on. * @n_freq_range: number of elements in @freq_range * * @iface_combinations: Valid interface combinations array, should not * list single interface types. * @n_iface_combinations: number of entries in @iface_combinations array. * * @antenna_mask: bitmask of antennas connected to this radio. */ struct wiphy_radio { const struct wiphy_radio_freq_range *freq_range; int n_freq_range; const struct ieee80211_iface_combination *iface_combinations; int n_iface_combinations; u32 antenna_mask; }; /** * enum wiphy_nan_flags - NAN capabilities * * @WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC: Device supports NAN configurable * synchronization. * @WIPHY_NAN_FLAGS_USERSPACE_DE: Device doesn't support DE offload. */ enum wiphy_nan_flags { WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC = BIT(0), WIPHY_NAN_FLAGS_USERSPACE_DE = BIT(1), }; /** * struct wiphy_nan_capa - NAN capabilities * * This structure describes the NAN capabilities of a wiphy. * * @flags: NAN capabilities flags, see &enum wiphy_nan_flags * @op_mode: NAN operation mode, as defined in Wi-Fi Aware (TM) specification * Table 81. * @n_antennas: number of antennas supported by the device for Tx/Rx. Lower * nibble indicates the number of TX antennas and upper nibble indicates the * number of RX antennas. Value 0 indicates the information is not * available. * @max_channel_switch_time: maximum channel switch time in milliseconds. * @dev_capabilities: NAN device capabilities as defined in Wi-Fi Aware (TM) * specification Table 79 (Capabilities field). */ struct wiphy_nan_capa { u32 flags; u8 op_mode; u8 n_antennas; u16 max_channel_switch_time; u8 dev_capabilities; }; #define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff /** * struct wiphy - wireless hardware description * @mtx: mutex for the data (structures) of this device * @reg_notifier: the driver's regulatory notification callback, * note that if your driver uses wiphy_apply_custom_regulatory() * the reg_notifier's request can be passed as NULL * @regd: the driver's regulatory domain, if one was requested via * the regulatory_hint() API. This can be used by the driver * on the reg_notifier() if it chooses to ignore future * regulatory domain changes caused by other drivers. * @signal_type: signal type reported in &struct cfg80211_bss. * @cipher_suites: supported cipher suites * @n_cipher_suites: number of supported cipher suites * @akm_suites: supported AKM suites. These are the default AKMs supported if * the supported AKMs not advertized for a specific interface type in * iftype_akm_suites. * @n_akm_suites: number of supported AKM suites * @iftype_akm_suites: array of supported akm suites info per interface type. * Note that the bits in @iftypes_mask inside this structure cannot * overlap (i.e. only one occurrence of each type is allowed across all * instances of iftype_akm_suites). * @num_iftype_akm_suites: number of interface types for which supported akm * suites are specified separately. * @retry_short: Retry limit for short frames (dot11ShortRetryLimit) * @retry_long: Retry limit for long frames (dot11LongRetryLimit) * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold); * -1 = fragmentation disabled, only odd values >= 256 used * @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled * @_net: the network namespace this wiphy currently lives in * @perm_addr: permanent MAC address of this device * @addr_mask: If the device supports multiple MAC addresses by masking, * set this to a mask with variable bits set to 1, e.g. if the last * four bits are variable then set it to 00-00-00-00-00-0f. The actual * variable bits shall be determined by the interfaces added, with * interfaces not matching the mask being rejected to be brought up. * @n_addresses: number of addresses in @addresses. * @addresses: If the device has more than one address, set this pointer * to a list of addresses (6 bytes each). The first one will be used * by default for perm_addr. In this case, the mask should be set to * all-zeroes. In this case it is assumed that the device can handle * the same number of arbitrary MAC addresses. * @registered: protects ->resume and ->suspend sysfs callbacks against * unregister hardware * @debugfsdir: debugfs directory used for this wiphy (ieee80211/<wiphyname>). * It will be renamed automatically on wiphy renames * @dev: (virtual) struct device for this wiphy. The item in * /sys/class/ieee80211/ points to this. You need use set_wiphy_dev() * (see below). * @wext: wireless extension handlers * @priv: driver private data (sized according to wiphy_new() parameter) * @interface_modes: bitmask of interfaces types valid for this wiphy, * must be set by driver * @iface_combinations: Valid interface combinations array, should not * list single interface types. * @n_iface_combinations: number of entries in @iface_combinations array. * @software_iftypes: bitmask of software interface types, these are not * subject to any restrictions since they are purely managed in SW. * @flags: wiphy flags, see &enum wiphy_flags * @regulatory_flags: wiphy regulatory flags, see * &enum ieee80211_regulatory_flags * @features: features advertised to nl80211, see &enum nl80211_feature_flags. * @ext_features: extended features advertised to nl80211, see * &enum nl80211_ext_feature_index. * @bss_priv_size: each BSS struct has private data allocated with it, * this variable determines its size * @max_scan_ssids: maximum number of SSIDs the device can scan for in * any given scan * @max_sched_scan_reqs: maximum number of scheduled scan requests that * the device can run concurrently. * @max_sched_scan_ssids: maximum number of SSIDs the device can scan * for in any given scheduled scan * @max_match_sets: maximum number of match sets the device can handle * when performing a scheduled scan, 0 if filtering is not * supported. * @max_scan_ie_len: maximum length of user-controlled IEs device can * add to probe request frames transmitted during a scan, must not * include fixed IEs like supported rates * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled * scans * @max_sched_scan_plans: maximum number of scan plans (scan interval and number * of iterations) for scheduled scan supported by the device. * @max_sched_scan_plan_interval: maximum interval (in seconds) for a * single scan plan supported by the device. * @max_sched_scan_plan_iterations: maximum number of iterations for a single * scan plan supported by the device. * @coverage_class: current coverage class * @fw_version: firmware version for ethtool reporting * @hw_version: hardware version for ethtool reporting * @max_num_pmkids: maximum number of PMKIDs supported by device * @privid: a pointer that drivers can use to identify if an arbitrary * wiphy is theirs, e.g. in global notifiers * @bands: information about bands/channels supported by this device * * @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or * transmitted through nl80211, points to an array indexed by interface * type * * @available_antennas_tx: bitmap of antennas which are available to be * configured as TX antennas. Antenna configuration commands will be * rejected unless this or @available_antennas_rx is set. * * @available_antennas_rx: bitmap of antennas which are available to be * configured as RX antennas. Antenna configuration commands will be * rejected unless this or @available_antennas_tx is set. * * @probe_resp_offload: * Bitmap of supported protocols for probe response offloading. * See &enum nl80211_probe_resp_offload_support_attr. Only valid * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. * * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation * may request, if implemented. * * @wowlan: WoWLAN support information * @wowlan_config: current WoWLAN configuration; this should usually not be * used since access to it is necessarily racy, use the parameter passed * to the suspend() operation instead. * * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. * If null, then none can be over-ridden. * @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden. * If null, then none can be over-ridden. * * @wdev_list: the list of associated (virtual) interfaces; this list must * not be modified by the driver, but can be read with RTNL/RCU protection. * * @max_acl_mac_addrs: Maximum number of MAC addresses that the device * supports for ACL. * * @extended_capabilities: extended capabilities supported by the driver, * additional capabilities might be supported by userspace; these are * the 802.11 extended capabilities ("Extended Capabilities element") * and are in the same format as in the information element. See * 802.11-2012 8.4.2.29 for the defined fields. These are the default * extended capabilities to be used if the capabilities are not specified * for a specific interface type in iftype_ext_capab. * @extended_capabilities_mask: mask of the valid values * @extended_capabilities_len: length of the extended capabilities * @iftype_ext_capab: array of extended capabilities per interface type * @num_iftype_ext_capab: number of interface types for which extended * capabilities are specified separately. * @coalesce: packet coalescing support information * * @vendor_commands: array of vendor commands supported by the hardware * @n_vendor_commands: number of vendor commands * @vendor_events: array of vendor events supported by the hardware * @n_vendor_events: number of vendor events * * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode * (including P2P GO) or 0 to indicate no such limit is advertised. The * driver is allowed to advertise a theoretical limit that it can reach in * some cases, but may not always reach. * * @max_num_csa_counters: Number of supported csa_counters in beacons * and probe responses. This value should be set if the driver * wishes to limit the number of csa counters. Default (0) means * infinite. * @bss_param_support: bitmask indicating which bss_parameters as defined in * &struct bss_parameters the driver can actually handle in the * .change_bss() callback. The bit positions are defined in &enum * wiphy_bss_param_flags. * * @bss_select_support: bitmask indicating the BSS selection criteria supported * by the driver in the .connect() callback. The bit position maps to the * attribute indices defined in &enum nl80211_bss_select_attr. * * @nan_supported_bands: bands supported by the device in NAN mode, a * bitmap of &enum nl80211_band values. For instance, for * NL80211_BAND_2GHZ, bit 0 would be set * (i.e. BIT(NL80211_BAND_2GHZ)). * @nan_capa: NAN capabilities * * @txq_limit: configuration of internal TX queue frame limit * @txq_memory_limit: configuration internal TX queue memory limit * @txq_quantum: configuration of internal TX queue scheduler quantum * * @tx_queue_len: allow setting transmit queue len for drivers not using * wake_tx_queue * * @support_mbssid: can HW support association with nontransmitted AP * @support_only_he_mbssid: don't parse MBSSID elements if it is not * HE AP, in order to avoid compatibility issues. * @support_mbssid must be set for this to have any effect. * * @pmsr_capa: peer measurement capabilities * * @tid_config_support: describes the per-TID config support that the * device has * @tid_config_support.vif: bitmap of attributes (configurations) * supported by the driver for each vif * @tid_config_support.peer: bitmap of attributes (configurations) * supported by the driver for each peer * @tid_config_support.max_retry: maximum supported retry count for * long/short retry configuration * * @max_data_retry_count: maximum supported per TID retry count for * configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes * @sar_capa: SAR control capabilities * @rfkill: a pointer to the rfkill structure * * @mbssid_max_interfaces: maximum number of interfaces supported by the driver * in a multiple BSSID set. This field must be set to a non-zero value * by the driver to advertise MBSSID support. * @ema_max_profile_periodicity: maximum profile periodicity supported by * the driver. Setting this field to a non-zero value indicates that the * driver supports enhanced multi-BSSID advertisements (EMA AP). * @max_num_akm_suites: maximum number of AKM suites allowed for * configuration through %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and * %NL80211_CMD_START_AP. Set to NL80211_MAX_NR_AKM_SUITES if not set by * driver. If set by driver minimum allowed value is * NL80211_MAX_NR_AKM_SUITES in order to avoid compatibility issues with * legacy userspace and maximum allowed value is * CFG80211_MAX_NUM_AKM_SUITES. * * @hw_timestamp_max_peers: maximum number of peers that the driver supports * enabling HW timestamping for concurrently. Setting this field to a * non-zero value indicates that the driver supports HW timestamping. * A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver * supports enabling HW timestamping for all peers (i.e. no need to * specify a mac address). * * @radio_cfg: configuration of radios belonging to a muli-radio wiphy. This * struct contains a list of all radio specific attributes and should be * used only for multi-radio wiphy. * * @radio: radios belonging to this wiphy * @n_radio: number of radios */ struct wiphy { struct mutex mtx; /* assign these fields before you register the wiphy */ u8 perm_addr[ETH_ALEN]; u8 addr_mask[ETH_ALEN]; struct mac_address *addresses; const struct ieee80211_txrx_stypes *mgmt_stypes; const struct ieee80211_iface_combination *iface_combinations; int n_iface_combinations; u16 software_iftypes; u16 n_addresses; /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ u16 interface_modes; u16 max_acl_mac_addrs; u32 flags, regulatory_flags, features; u8 ext_features[DIV_ROUND_UP(NUM_NL80211_EXT_FEATURES, 8)]; u32 ap_sme_capa; enum cfg80211_signal_type signal_type; int bss_priv_size; u8 max_scan_ssids; u8 max_sched_scan_reqs; u8 max_sched_scan_ssids; u8 max_match_sets; u16 max_scan_ie_len; u16 max_sched_scan_ie_len; u32 max_sched_scan_plans; u32 max_sched_scan_plan_interval; u32 max_sched_scan_plan_iterations; int n_cipher_suites; const u32 *cipher_suites; int n_akm_suites; const u32 *akm_suites; const struct wiphy_iftype_akm_suites *iftype_akm_suites; unsigned int num_iftype_akm_suites; u8 retry_short; u8 retry_long; u32 frag_threshold; u32 rts_threshold; u8 coverage_class; char fw_version[ETHTOOL_FWVERS_LEN]; u32 hw_version; #ifdef CONFIG_PM const struct wiphy_wowlan_support *wowlan; struct cfg80211_wowlan *wowlan_config; #endif u16 max_remain_on_channel_duration; u8 max_num_pmkids; u32 available_antennas_tx; u32 available_antennas_rx; u32 probe_resp_offload; const u8 *extended_capabilities, *extended_capabilities_mask; u8 extended_capabilities_len; const struct wiphy_iftype_ext_capab *iftype_ext_capab; unsigned int num_iftype_ext_capab; const void *privid; struct ieee80211_supported_band *bands[NUM_NL80211_BANDS]; void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request); struct wiphy_radio_cfg *radio_cfg; /* fields below are read-only, assigned by cfg80211 */ const struct ieee80211_regdomain __rcu *regd; struct device dev; bool registered; struct dentry *debugfsdir; const struct ieee80211_ht_cap *ht_capa_mod_mask; const struct ieee80211_vht_cap *vht_capa_mod_mask; struct list_head wdev_list; possible_net_t _net; #ifdef CONFIG_CFG80211_WEXT const struct iw_handler_def *wext; #endif const struct wiphy_coalesce_support *coalesce; const struct wiphy_vendor_command *vendor_commands; const struct nl80211_vendor_cmd_info *vendor_events; int n_vendor_commands, n_vendor_events; u16 max_ap_assoc_sta; u8 max_num_csa_counters; u32 bss_param_support; u32 bss_select_support; u8 nan_supported_bands; struct wiphy_nan_capa nan_capa; u32 txq_limit; u32 txq_memory_limit; u32 txq_quantum; unsigned long tx_queue_len; u8 support_mbssid:1, support_only_he_mbssid:1; const struct cfg80211_pmsr_capabilities *pmsr_capa; struct { u64 peer, vif; u8 max_retry; } tid_config_support; u8 max_data_retry_count; const struct cfg80211_sar_capa *sar_capa; struct rfkill *rfkill; u8 mbssid_max_interfaces; u8 ema_max_profile_periodicity; u16 max_num_akm_suites; u16 hw_timestamp_max_peers; int n_radio; const struct wiphy_radio *radio; char priv[] __aligned(NETDEV_ALIGN); }; static inline struct net *wiphy_net(struct wiphy *wiphy) { return read_pnet(&wiphy->_net); } static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) { write_pnet(&wiphy->_net, net); } /** * wiphy_priv - return priv from wiphy * * @wiphy: the wiphy whose priv pointer to return * Return: The priv of @wiphy. */ static inline void *wiphy_priv(struct wiphy *wiphy) { BUG_ON(!wiphy); return &wiphy->priv; } /** * priv_to_wiphy - return the wiphy containing the priv * * @priv: a pointer previously returned by wiphy_priv * Return: The wiphy of @priv. */ static inline struct wiphy *priv_to_wiphy(void *priv) { BUG_ON(!priv); return container_of(priv, struct wiphy, priv); } /** * set_wiphy_dev - set device pointer for wiphy * * @wiphy: The wiphy whose device to bind * @dev: The device to parent it to */ static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev) { wiphy->dev.parent = dev; } /** * wiphy_dev - get wiphy dev pointer * * @wiphy: The wiphy whose device struct to look up * Return: The dev of @wiphy. */ static inline struct device *wiphy_dev(struct wiphy *wiphy) { return wiphy->dev.parent; } /** * wiphy_name - get wiphy name * * @wiphy: The wiphy whose name to return * Return: The name of @wiphy. */ static inline const char *wiphy_name(const struct wiphy *wiphy) { return dev_name(&wiphy->dev); } /** * wiphy_new_nm - create a new wiphy for use with cfg80211 * * @ops: The configuration operations for this device * @sizeof_priv: The size of the private area to allocate * @requested_name: Request a particular name. * NULL is valid value, and means use the default phy%d naming. * * Create a new wiphy and associate the given operations with it. * @sizeof_priv bytes are allocated for private use. * * Return: A pointer to the new wiphy. This pointer must be * assigned to each netdev's ieee80211_ptr for proper operation. */ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, const char *requested_name); /** * wiphy_new - create a new wiphy for use with cfg80211 * * @ops: The configuration operations for this device * @sizeof_priv: The size of the private area to allocate * * Create a new wiphy and associate the given operations with it. * @sizeof_priv bytes are allocated for private use. * * Return: A pointer to the new wiphy. This pointer must be * assigned to each netdev's ieee80211_ptr for proper operation. */ static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) { return wiphy_new_nm(ops, sizeof_priv, NULL); } /** * wiphy_register - register a wiphy with cfg80211 * * @wiphy: The wiphy to register. * * Return: A non-negative wiphy index or a negative error code. */ int wiphy_register(struct wiphy *wiphy); /* this is a define for better error reporting (file/line) */ #define lockdep_assert_wiphy(wiphy) lockdep_assert_held(&(wiphy)->mtx) /** * rcu_dereference_wiphy - rcu_dereference with debug checking * @wiphy: the wiphy to check the locking on * @p: The pointer to read, prior to dereferencing * * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() * or RTNL. Note: Please prefer wiphy_dereference() or rcu_dereference(). */ #define rcu_dereference_wiphy(wiphy, p) \ rcu_dereference_check(p, lockdep_is_held(&wiphy->mtx)) /** * wiphy_dereference - fetch RCU pointer when updates are prevented by wiphy mtx * @wiphy: the wiphy to check the locking on * @p: The pointer to read, prior to dereferencing * * Return: the value of the specified RCU-protected pointer, but omit the * READ_ONCE(), because caller holds the wiphy mutex used for updates. */ #define wiphy_dereference(wiphy, p) \ rcu_dereference_protected(p, lockdep_is_held(&wiphy->mtx)) /** * get_wiphy_regdom - get custom regdomain for the given wiphy * @wiphy: the wiphy to get the regdomain from * * Context: Requires any of RTNL, wiphy mutex or RCU protection. * * Return: pointer to the regulatory domain associated with the wiphy */ const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy); /** * wiphy_unregister - deregister a wiphy from cfg80211 * * @wiphy: The wiphy to unregister. * * After this call, no more requests can be made with this priv * pointer, but the call may sleep to wait for an outstanding * request that is being handled. */ void wiphy_unregister(struct wiphy *wiphy); /** * wiphy_free - free wiphy * * @wiphy: The wiphy to free */ void wiphy_free(struct wiphy *wiphy); /* internal structs */ struct cfg80211_conn; struct cfg80211_internal_bss; struct cfg80211_cached_keys; struct cfg80211_cqm_config; /** * wiphy_lock - lock the wiphy * @wiphy: the wiphy to lock * * This is needed around registering and unregistering netdevs that * aren't created through cfg80211 calls, since that requires locking * in cfg80211 when the notifiers is called, but that cannot * differentiate which way it's called. * * It can also be used by drivers for their own purposes. * * When cfg80211 ops are called, the wiphy is already locked. * * Note that this makes sure that no workers that have been queued * with wiphy_queue_work() are running. */ static inline void wiphy_lock(struct wiphy *wiphy) __acquires(&wiphy->mtx) { mutex_lock(&wiphy->mtx); __acquire(&wiphy->mtx); } /** * wiphy_unlock - unlock the wiphy again * @wiphy: the wiphy to unlock */ static inline void wiphy_unlock(struct wiphy *wiphy) __releases(&wiphy->mtx) { __release(&wiphy->mtx); mutex_unlock(&wiphy->mtx); } DEFINE_GUARD(wiphy, struct wiphy *, mutex_lock(&_T->mtx), mutex_unlock(&_T->mtx)) struct wiphy_work; typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *); struct wiphy_work { struct list_head entry; wiphy_work_func_t func; }; static inline void wiphy_work_init(struct wiphy_work *work, wiphy_work_func_t func) { INIT_LIST_HEAD(&work->entry); work->func = func; } /** * wiphy_work_queue - queue work for the wiphy * @wiphy: the wiphy to queue for * @work: the work item * * This is useful for work that must be done asynchronously, and work * queued here has the special property that the wiphy mutex will be * held as if wiphy_lock() was called, and that it cannot be running * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can * use just cancel_work() instead of cancel_work_sync(), it requires * being in a section protected by wiphy_lock(). */ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work); /** * wiphy_work_cancel - cancel previously queued work * @wiphy: the wiphy, for debug purposes * @work: the work to cancel * * Cancel the work *without* waiting for it, this assumes being * called under the wiphy mutex acquired by wiphy_lock(). */ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work); /** * wiphy_work_flush - flush previously queued work * @wiphy: the wiphy, for debug purposes * @work: the work to flush, this can be %NULL to flush all work * * Flush the work (i.e. run it if pending). This must be called * under the wiphy mutex acquired by wiphy_lock(). */ void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work); struct wiphy_delayed_work { struct wiphy_work work; struct wiphy *wiphy; struct timer_list timer; }; void wiphy_delayed_work_timer(struct timer_list *t); static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork, wiphy_work_func_t func) { timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0); wiphy_work_init(&dwork->work, func); } /** * wiphy_delayed_work_queue - queue delayed work for the wiphy * @wiphy: the wiphy to queue for * @dwork: the delayable worker * @delay: number of jiffies to wait before queueing * * This is useful for work that must be done asynchronously, and work * queued here has the special property that the wiphy mutex will be * held as if wiphy_lock() was called, and that it cannot be running * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can * use just cancel_work() instead of cancel_work_sync(), it requires * being in a section protected by wiphy_lock(). */ void wiphy_delayed_work_queue(struct wiphy *wiphy, struct wiphy_delayed_work *dwork, unsigned long delay); /** * wiphy_delayed_work_cancel - cancel previously queued delayed work * @wiphy: the wiphy, for debug purposes * @dwork: the delayed work to cancel * * Cancel the work *without* waiting for it, this assumes being * called under the wiphy mutex acquired by wiphy_lock(). */ void wiphy_delayed_work_cancel(struct wiphy *wiphy, struct wiphy_delayed_work *dwork); /** * wiphy_delayed_work_flush - flush previously queued delayed work * @wiphy: the wiphy, for debug purposes * @dwork: the delayed work to flush * * Flush the work (i.e. run it if pending). This must be called * under the wiphy mutex acquired by wiphy_lock(). */ void wiphy_delayed_work_flush(struct wiphy *wiphy, struct wiphy_delayed_work *dwork); /** * wiphy_delayed_work_pending - Find out whether a wiphy delayable * work item is currently pending. * * @wiphy: the wiphy, for debug purposes * @dwork: the delayed work in question * * Return: true if timer is pending, false otherwise * * How wiphy_delayed_work_queue() works is by setting a timer which * when it expires calls wiphy_work_queue() to queue the wiphy work. * Because wiphy_delayed_work_queue() uses mod_timer(), if it is * called twice and the second call happens before the first call * deadline, the work will rescheduled for the second deadline and * won't run before that. * * wiphy_delayed_work_pending() can be used to detect if calling * wiphy_work_delayed_work_queue() would start a new work schedule * or delayed a previous one. As seen below it cannot be used to * detect precisely if the work has finished to execute nor if it * is currently executing. * * CPU0 CPU1 * wiphy_delayed_work_queue(wk) * mod_timer(wk->timer) * wiphy_delayed_work_pending(wk) -> true * * [...] * expire_timers(wk->timer) * detach_timer(wk->timer) * wiphy_delayed_work_pending(wk) -> false * wk->timer->function() | * wiphy_work_queue(wk) | delayed work pending * list_add_tail() | returns false but * queue_work(cfg80211_wiphy_work) | wk->func() has not * | been run yet * [...] | * cfg80211_wiphy_work() | * wk->func() V * */ bool wiphy_delayed_work_pending(struct wiphy *wiphy, struct wiphy_delayed_work *dwork); /** * enum ieee80211_ap_reg_power - regulatory power for an Access Point * * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode * @IEEE80211_REG_LPI_AP: Indoor Access Point * @IEEE80211_REG_SP_AP: Standard power Access Point * @IEEE80211_REG_VLP_AP: Very low power Access Point */ enum ieee80211_ap_reg_power { IEEE80211_REG_UNSET_AP, IEEE80211_REG_LPI_AP, IEEE80211_REG_SP_AP, IEEE80211_REG_VLP_AP, }; /** * struct wireless_dev - wireless device state * * For netdevs, this structure must be allocated by the driver * that uses the ieee80211_ptr field in struct net_device (this * is intentional so it can be allocated along with the netdev.) * It need not be registered then as netdev registration will * be intercepted by cfg80211 to see the new wireless device, * however, drivers must lock the wiphy before registering or * unregistering netdevs if they pre-create any netdevs (in ops * called from cfg80211, the wiphy is already locked.) * * For non-netdev uses, it must also be allocated by the driver * in response to the cfg80211 callbacks that require it, as * there's no netdev registration in that case it may not be * allocated outside of callback operations that return it. * * @wiphy: pointer to hardware description * @iftype: interface type * @registered: is this wdev already registered with cfg80211 * @registering: indicates we're doing registration under wiphy lock * for the notifier * @list: (private) Used to collect the interfaces * @netdev: (private) Used to reference back to the netdev, may be %NULL * @identifier: (private) Identifier used in nl80211 to identify this * wireless device if it has no netdev * @u: union containing data specific to @iftype * @connected: indicates if connected or not (STA mode) * @wext: (private) Used by the internal wireless extensions compat code * @wext.ibss: (private) IBSS data part of wext handling * @wext.connect: (private) connection handling data * @wext.keys: (private) (WEP) key data * @wext.ie: (private) extra elements for association * @wext.ie_len: (private) length of extra elements * @wext.bssid: (private) selected network BSSID * @wext.ssid: (private) selected network SSID * @wext.default_key: (private) selected default key index * @wext.default_mgmt_key: (private) selected default management key index * @wext.prev_bssid: (private) previous BSSID for reassociation * @wext.prev_bssid_valid: (private) previous BSSID validity * @use_4addr: indicates 4addr mode is used on this interface, must be * set by driver (if supported) on add_interface BEFORE registering the * netdev and may otherwise be used by driver read-only, will be update * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_need_update: mgmt registrations were updated, * need to propagate the update to the driver * @address: The address for this device, valid only if @netdev is %NULL * @is_running: true if this is a non-netdev device that has been started, e.g. * the P2P Device. * @ps: powersave mode is enabled * @ps_timeout: dynamic powersave timeout * @ap_unexpected_nlportid: (private) netlink port ID of application * registered for unexpected class 3 frames (AP mode) * @conn: (private) cfg80211 software SME connection state machine data * @connect_keys: (private) keys to set after connection is established * @conn_bss_type: connecting/connected BSS type * @conn_owner_nlportid: (private) connection owner socket port ID * @disconnect_wk: (private) auto-disconnect work * @disconnect_bssid: (private) the BSSID to use for auto-disconnect * @event_list: (private) list for internal event processing * @event_lock: (private) lock for event list * @owner_nlportid: (private) owner socket port ID * @nl_owner_dead: (private) owner socket went away * @cqm_rssi_work: (private) CQM RSSI reporting work * @cqm_config: (private) nl80211 RSSI monitor state * @pmsr_list: (private) peer measurement requests * @pmsr_lock: (private) peer measurements requests/results lock * @pmsr_free_wk: (private) peer measurements cleanup work * @unprot_beacon_reported: (private) timestamp of last * unprotected beacon report * @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr * @ap and @client for each link * @links.cac_started: true if DFS channel availability check has been * started * @links.cac_start_time: timestamp (jiffies) when the dfs state was * entered. * @links.cac_time_ms: CAC time in ms * @valid_links: bitmap describing what elements of @links are valid * @radio_mask: Bitmask of radios that this interface is allowed to operate on. */ struct wireless_dev { struct wiphy *wiphy; enum nl80211_iftype iftype; /* the remainder of this struct should be private to cfg80211 */ struct list_head list; struct net_device *netdev; u32 identifier; struct list_head mgmt_registrations; u8 mgmt_registrations_need_update:1; bool use_4addr, is_running, registered, registering; u8 address[ETH_ALEN] __aligned(sizeof(u16)); /* currently used for IBSS and SME - might be rearranged later */ struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; enum ieee80211_bss_type conn_bss_type; u32 conn_owner_nlportid; struct work_struct disconnect_wk; u8 disconnect_bssid[ETH_ALEN]; struct list_head event_list; spinlock_t event_lock; u8 connected:1; bool ps; int ps_timeout; u32 ap_unexpected_nlportid; u32 owner_nlportid; bool nl_owner_dead; #ifdef CONFIG_CFG80211_WEXT /* wext data */ struct { struct cfg80211_ibss_params ibss; struct cfg80211_connect_params connect; struct cfg80211_cached_keys *keys; const u8 *ie; size_t ie_len; u8 bssid[ETH_ALEN]; u8 prev_bssid[ETH_ALEN]; u8 ssid[IEEE80211_MAX_SSID_LEN]; s8 default_key, default_mgmt_key; bool prev_bssid_valid; } wext; #endif struct wiphy_work cqm_rssi_work; struct cfg80211_cqm_config __rcu *cqm_config; struct list_head pmsr_list; spinlock_t pmsr_lock; struct work_struct pmsr_free_wk; unsigned long unprot_beacon_reported; union { struct { u8 connected_addr[ETH_ALEN] __aligned(2); u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len; } client; struct { int beacon_interval; struct cfg80211_chan_def preset_chandef; struct cfg80211_chan_def chandef; u8 id[IEEE80211_MAX_MESH_ID_LEN]; u8 id_len, id_up_len; } mesh; struct { struct cfg80211_chan_def preset_chandef; u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len; } ap; struct { struct cfg80211_internal_bss *current_bss; struct cfg80211_chan_def chandef; int beacon_interval; u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len; } ibss; struct { struct cfg80211_chan_def chandef; } ocb; struct { u8 cluster_id[ETH_ALEN] __aligned(2); } nan; } u; struct { u8 addr[ETH_ALEN] __aligned(2); union { struct { unsigned int beacon_interval; struct cfg80211_chan_def chandef; } ap; struct { struct cfg80211_internal_bss *current_bss; } client; }; bool cac_started; unsigned long cac_start_time; unsigned int cac_time_ms; } links[IEEE80211_MLD_MAX_NUM_LINKS]; u16 valid_links; u32 radio_mask; }; static inline const u8 *wdev_address(struct wireless_dev *wdev) { if (wdev->netdev) return wdev->netdev->dev_addr; return wdev->address; } static inline bool wdev_running(struct wireless_dev *wdev) { if (wdev->netdev) return netif_running(wdev->netdev); return wdev->is_running; } /** * wdev_priv - return wiphy priv from wireless_dev * * @wdev: The wireless device whose wiphy's priv pointer to return * Return: The wiphy priv of @wdev. */ static inline void *wdev_priv(struct wireless_dev *wdev) { BUG_ON(!wdev); return wiphy_priv(wdev->wiphy); } /** * wdev_chandef - return chandef pointer from wireless_dev * @wdev: the wdev * @link_id: the link ID for MLO * * Return: The chandef depending on the mode, or %NULL. */ struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev, unsigned int link_id); static inline void WARN_INVALID_LINK_ID(struct wireless_dev *wdev, unsigned int link_id) { WARN_ON(link_id && !wdev->valid_links); WARN_ON(wdev->valid_links && !(wdev->valid_links & BIT(link_id))); } #define for_each_valid_link(link_info, link_id) \ for (link_id = 0; \ link_id < ((link_info)->valid_links ? \ ARRAY_SIZE((link_info)->links) : 1); \ link_id++) \ if (!(link_info)->valid_links || \ ((link_info)->valid_links & BIT(link_id))) /** * DOC: Utility functions * * cfg80211 offers a number of utility functions that can be useful. */ /** * ieee80211_channel_equal - compare two struct ieee80211_channel * * @a: 1st struct ieee80211_channel * @b: 2nd struct ieee80211_channel * Return: true if center frequency of @a == @b */ static inline bool ieee80211_channel_equal(struct ieee80211_channel *a, struct ieee80211_channel *b) { return (a->center_freq == b->center_freq && a->freq_offset == b->freq_offset); } /** * ieee80211_channel_to_khz - convert ieee80211_channel to frequency in KHz * @chan: struct ieee80211_channel to convert * Return: The corresponding frequency (in KHz) */ static inline u32 ieee80211_channel_to_khz(const struct ieee80211_channel *chan) { return MHZ_TO_KHZ(chan->center_freq) + chan->freq_offset; } /** * ieee80211_channel_to_freq_khz - convert channel number to frequency * @chan: channel number * @band: band, necessary due to channel number overlap * Return: The corresponding frequency (in KHz), or 0 if the conversion failed. */ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band); /** * ieee80211_channel_to_frequency - convert channel number to frequency * @chan: channel number * @band: band, necessary due to channel number overlap * Return: The corresponding frequency (in MHz), or 0 if the conversion failed. */ static inline int ieee80211_channel_to_frequency(int chan, enum nl80211_band band) { return KHZ_TO_MHZ(ieee80211_channel_to_freq_khz(chan, band)); } /** * ieee80211_freq_khz_to_channel - convert frequency to channel number * @freq: center frequency in KHz * Return: The corresponding channel, or 0 if the conversion failed. */ int ieee80211_freq_khz_to_channel(u32 freq); /** * ieee80211_frequency_to_channel - convert frequency to channel number * @freq: center frequency in MHz * Return: The corresponding channel, or 0 if the conversion failed. */ static inline int ieee80211_frequency_to_channel(int freq) { return ieee80211_freq_khz_to_channel(MHZ_TO_KHZ(freq)); } /** * ieee80211_get_channel_khz - get channel struct from wiphy for specified * frequency * @wiphy: the struct wiphy to get the channel for * @freq: the center frequency (in KHz) of the channel * Return: The channel struct from @wiphy at @freq. */ struct ieee80211_channel * ieee80211_get_channel_khz(struct wiphy *wiphy, u32 freq); /** * ieee80211_get_channel - get channel struct from wiphy for specified frequency * * @wiphy: the struct wiphy to get the channel for * @freq: the center frequency (in MHz) of the channel * Return: The channel struct from @wiphy at @freq. */ static inline struct ieee80211_channel * ieee80211_get_channel(struct wiphy *wiphy, int freq) { return ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(freq)); } /** * cfg80211_channel_is_psc - Check if the channel is a 6 GHz PSC * @chan: control channel to check * * The Preferred Scanning Channels (PSC) are defined in * Draft IEEE P802.11ax/D5.0, 26.17.2.3.3 * * Return: %true if channel is a PSC, %false otherwise */ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan) { if (chan->band != NL80211_BAND_6GHZ) return false; return ieee80211_frequency_to_channel(chan->center_freq) % 16 == 5; } /** * cfg80211_radio_chandef_valid - Check if the radio supports the chandef * * @radio: wiphy radio * @chandef: chandef for current channel * * Return: whether or not the given chandef is valid for the given radio */ bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, const struct cfg80211_chan_def *chandef); /** * cfg80211_wdev_channel_allowed - Check if the wdev may use the channel * * @wdev: the wireless device * @chan: channel to check * * Return: whether or not the wdev may use the channel */ bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev, struct ieee80211_channel *chan); /** * ieee80211_get_response_rate - get basic rate for a given rate * * @sband: the band to look for rates in * @basic_rates: bitmap of basic rates * @bitrate: the bitrate for which to find the basic rate * * Return: The basic rate corresponding to a given bitrate, that * is the next lower bitrate contained in the basic rate map, * which is, for this function, given as a bitmap of indices of * rates in the band's bitrate table. */ const struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate); /** * ieee80211_mandatory_rates - get mandatory rates for a given band * @sband: the band to look for rates in * * Return: a bitmap of the mandatory rates for the given band, bits * are set according to the rate position in the bitrates array. */ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband); /* * Radiotap parsing functions -- for controlled injection support * * Implemented in net/wireless/radiotap.c * Documentation in Documentation/networking/radiotap-headers.rst */ struct radiotap_align_size { uint8_t align:4, size:4; }; struct ieee80211_radiotap_namespace { const struct radiotap_align_size *align_size; int n_bits; uint32_t oui; uint8_t subns; }; struct ieee80211_radiotap_vendor_namespaces { const struct ieee80211_radiotap_namespace *ns; int n_ns; }; /** * struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args * @this_arg_index: index of current arg, valid after each successful call * to ieee80211_radiotap_iterator_next() * @this_arg: pointer to current radiotap arg; it is valid after each * call to ieee80211_radiotap_iterator_next() but also after * ieee80211_radiotap_iterator_init() where it will point to * the beginning of the actual data portion * @this_arg_size: length of the current arg, for convenience * @current_namespace: pointer to the current namespace definition * (or internally %NULL if the current namespace is unknown) * @is_radiotap_ns: indicates whether the current namespace is the default * radiotap namespace or not * * @_rtheader: pointer to the radiotap header we are walking through * @_max_length: length of radiotap header in cpu byte ordering * @_arg_index: next argument index * @_arg: next argument pointer * @_next_bitmap: internal pointer to next present u32 * @_bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present * @_vns: vendor namespace definitions * @_next_ns_data: beginning of the next namespace's data * @_reset_on_ext: internal; reset the arg index to 0 when going to the * next bitmap word * * Describes the radiotap parser state. Fields prefixed with an underscore * must not be used by users of the parser, only by the parser internally. */ struct ieee80211_radiotap_iterator { struct ieee80211_radiotap_header *_rtheader; const struct ieee80211_radiotap_vendor_namespaces *_vns; const struct ieee80211_radiotap_namespace *current_namespace; unsigned char *_arg, *_next_ns_data; __le32 *_next_bitmap; unsigned char *this_arg; int this_arg_index; int this_arg_size; int is_radiotap_ns; int _max_length; int _arg_index; uint32_t _bitmap_shifter; int _reset_on_ext; }; int ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns); int ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator); extern const unsigned char rfc1042_header[6]; extern const unsigned char bridge_tunnel_header[6]; /** * ieee80211_get_hdrlen_from_skb - get header length from data * * @skb: the frame * * Given an skb with a raw 802.11 header at the data pointer this function * returns the 802.11 header length. * * Return: The 802.11 header length in bytes (not including encryption * headers). Or 0 if the data in the sk_buff is too short to contain a valid * 802.11 header. */ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); /** * ieee80211_hdrlen - get header length in bytes from frame control * @fc: frame control field in little-endian format * Return: The header length in bytes. */ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc); /** * ieee80211_get_mesh_hdrlen - get mesh extension header length * @meshhdr: the mesh extension header, only the flags field * (first byte) will be accessed * Return: The length of the extension header, which is always at * least 6 bytes and at most 18 if address 5 and 6 are present. */ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); /** * DOC: Data path helpers * * In addition to generic utilities, cfg80211 also offers * functions that help implement the data path for devices * that do not do the 802.11/802.3 conversion on the device. */ /** * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3 * @skb: the 802.11 data frame * @ehdr: pointer to a &struct ethhdr that will get the header, instead * of it being pushed into the SKB * @addr: the device MAC address * @iftype: the virtual interface type * @data_offset: offset of payload after the 802.11 header * @is_amsdu: true if the 802.11 header is A-MSDU * Return: 0 on success. Non-zero on error. */ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, const u8 *addr, enum nl80211_iftype iftype, u8 data_offset, bool is_amsdu); /** * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 * @skb: the 802.11 data frame * @addr: the device MAC address * @iftype: the virtual interface type * Return: 0 on success. Non-zero on error. */ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype) { return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false); } /** * ieee80211_is_valid_amsdu - check if subframe lengths of an A-MSDU are valid * * This is used to detect non-standard A-MSDU frames, e.g. the ones generated * by ath10k and ath11k, where the subframe length includes the length of the * mesh control field. * * @skb: The input A-MSDU frame without any headers. * @mesh_hdr: the type of mesh header to test * 0: non-mesh A-MSDU length field * 1: big-endian mesh A-MSDU length field * 2: little-endian mesh A-MSDU length field * Returns: true if subframe header lengths are valid for the @mesh_hdr mode */ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr); /** * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame * * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames. * The @list will be empty if the decode fails. The @skb must be fully * header-less before being passed in here; it is freed in this function. * * @skb: The input A-MSDU frame without any headers. * @list: The output list of 802.3 frames. It must be allocated and * initialized by the caller. * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. * @check_da: DA to check in the inner ethernet header, or NULL * @check_sa: SA to check in the inner ethernet header, or NULL * @mesh_control: see mesh_hdr in ieee80211_is_valid_amsdu */ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, const u8 *check_da, const u8 *check_sa, u8 mesh_control); /** * ieee80211_get_8023_tunnel_proto - get RFC1042 or bridge tunnel encap protocol * * Check for RFC1042 or bridge tunnel header and fetch the encapsulated * protocol. * * @hdr: pointer to the MSDU payload * @proto: destination pointer to store the protocol * Return: true if encapsulation was found */ bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto); /** * ieee80211_strip_8023_mesh_hdr - strip mesh header from converted 802.3 frames * * Strip the mesh header, which was left in by ieee80211_data_to_8023 as part * of the MSDU data. Also move any source/destination addresses from the mesh * header to the ethernet header (if present). * * @skb: The 802.3 frame with embedded mesh header * * Return: 0 on success. Non-zero on error. */ int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb); /** * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame * @skb: the data frame * @qos_map: Interworking QoS mapping or %NULL if not in use * Return: The 802.1p/1d tag. */ unsigned int cfg80211_classify8021d(struct sk_buff *skb, struct cfg80211_qos_map *qos_map); /** * cfg80211_find_elem_match - match information element and byte array in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * @match: byte array to match * @match_len: number of bytes in the match array * @match_offset: offset in the IE data where the byte array should match. * Note the difference to cfg80211_find_ie_match() which considers * the offset to start from the element ID byte, but here we take * the data portion instead. * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. * * Note: There are no checks on the element length other than * having to fit into the given data and being large enough for the * byte array to match. */ const struct element * cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len, const u8 *match, unsigned int match_len, unsigned int match_offset); /** * cfg80211_find_ie_match - match information element and byte array in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * @match: byte array to match * @match_len: number of bytes in the match array * @match_offset: offset in the IE where the byte array should match. * If match_len is zero, this must also be set to zero. * Otherwise this must be set to 2 or more, because the first * byte is the element id, which is already compared to eid, and * the second byte is the IE length. * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match, or a pointer to the first * byte of the requested element, that is the byte containing the * element ID. * * Note: There are no checks on the element length other than * having to fit into the given data and being large enough for the * byte array to match. */ static inline const u8 * cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len, const u8 *match, unsigned int match_len, unsigned int match_offset) { /* match_offset can't be smaller than 2, unless match_len is * zero, in which case match_offset must be zero as well. */ if (WARN_ON((match_len && match_offset < 2) || (!match_len && match_offset))) return NULL; return (const void *)cfg80211_find_elem_match(eid, ies, len, match, match_len, match_offset ? match_offset - 2 : 0); } /** * cfg80211_find_elem - find information element in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const struct element * cfg80211_find_elem(u8 eid, const u8 *ies, int len) { return cfg80211_find_elem_match(eid, ies, len, NULL, 0, 0); } /** * cfg80211_find_ie - find information element in data * * @eid: element ID * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the element ID could not be found or if * the element is invalid (claims to be longer than the given * data), or a pointer to the first byte of the requested * element, that is the byte containing the element ID. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) { return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0); } /** * cfg80211_find_ext_elem - find information element with EID Extension in data * * @ext_eid: element ID Extension * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the extended element could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const struct element * cfg80211_find_ext_elem(u8 ext_eid, const u8 *ies, int len) { return cfg80211_find_elem_match(WLAN_EID_EXTENSION, ies, len, &ext_eid, 1, 0); } /** * cfg80211_find_ext_ie - find information element with EID Extension in data * * @ext_eid: element ID Extension * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the extended element ID could not be found or if * the element is invalid (claims to be longer than the given * data), or a pointer to the first byte of the requested * element, that is the byte containing the element ID. * * Note: There are no checks on the element length other than * having to fit into the given data. */ static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len) { return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len, &ext_eid, 1, 2); } /** * cfg80211_find_vendor_elem - find vendor specific information element in data * * @oui: vendor OUI * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the vendor specific element ID could not be found or if the * element is invalid (claims to be longer than the given data); otherwise * return the element structure for the requested element. * * Note: There are no checks on the element length other than having to fit into * the given data. */ const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type, const u8 *ies, unsigned int len); /** * cfg80211_find_vendor_ie - find vendor specific information element in data * * @oui: vendor OUI * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any * @ies: data consisting of IEs * @len: length of data * * Return: %NULL if the vendor specific element ID could not be found or if the * element is invalid (claims to be longer than the given data), or a pointer to * the first byte of the requested element, that is the byte containing the * element ID. * * Note: There are no checks on the element length other than having to fit into * the given data. */ static inline const u8 * cfg80211_find_vendor_ie(unsigned int oui, int oui_type, const u8 *ies, unsigned int len) { return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len); } /** * enum cfg80211_rnr_iter_ret - reduced neighbor report iteration state * @RNR_ITER_CONTINUE: continue iterating with the next entry * @RNR_ITER_BREAK: break iteration and return success * @RNR_ITER_ERROR: break iteration and return error */ enum cfg80211_rnr_iter_ret { RNR_ITER_CONTINUE, RNR_ITER_BREAK, RNR_ITER_ERROR, }; /** * cfg80211_iter_rnr - iterate reduced neighbor report entries * @elems: the frame elements to iterate RNR elements and then * their entries in * @elems_len: length of the elements * @iter: iteration function, see also &enum cfg80211_rnr_iter_ret * for the return value * @iter_data: additional data passed to the iteration function * Return: %true on success (after successfully iterating all entries * or if the iteration function returned %RNR_ITER_BREAK), * %false on error (iteration function returned %RNR_ITER_ERROR * or elements were malformed.) */ bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len, enum cfg80211_rnr_iter_ret (*iter)(void *data, u8 type, const struct ieee80211_neighbor_ap_info *info, const u8 *tbtt_info, u8 tbtt_info_len), void *iter_data); /** * cfg80211_defragment_element - Defrag the given element data into a buffer * * @elem: the element to defragment * @ies: elements where @elem is contained * @ieslen: length of @ies * @data: buffer to store element data, or %NULL to just determine size * @data_len: length of @data, or 0 * @frag_id: the element ID of fragments * * Return: length of @data, or -EINVAL on error * * Copy out all data from an element that may be fragmented into @data, while * skipping all headers. * * The function uses memmove() internally. It is acceptable to defragment an * element in-place. */ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies, size_t ieslen, u8 *data, size_t data_len, u8 frag_id); /** * cfg80211_send_layer2_update - send layer 2 update frame * * @dev: network device * @addr: STA MAC address * * Wireless drivers can use this function to update forwarding tables in bridge * devices upon STA association. */ void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr); /** * DOC: Regulatory enforcement infrastructure * * TODO */ /** * regulatory_hint - driver hint to the wireless core a regulatory domain * @wiphy: the wireless device giving the hint (used only for reporting * conflicts) * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain * should be in. If @rd is set this should be NULL. Note that if you * set this to NULL you should still set rd->alpha2 to some accepted * alpha2. * * Wireless drivers can use this function to hint to the wireless core * what it believes should be the current regulatory domain by * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory * domain should be in or by providing a completely build regulatory domain. * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried * for a regulatory domain structure for the respective country. * * The wiphy must have been registered to cfg80211 prior to this call. * For cfg80211 drivers this means you must first use wiphy_register(), * for mac80211 drivers you must first use ieee80211_register_hw(). * * Drivers should check the return value, its possible you can get * an -ENOMEM. * * Return: 0 on success. -ENOMEM. */ int regulatory_hint(struct wiphy *wiphy, const char *alpha2); /** * regulatory_set_wiphy_regd - set regdom info for self managed drivers * @wiphy: the wireless device we want to process the regulatory domain on * @rd: the regulatory domain information to use for this wiphy * * Set the regulatory domain information for self-managed wiphys, only they * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more * information. * * Return: 0 on success. -EINVAL, -EPERM */ int regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd); /** * regulatory_set_wiphy_regd_sync - set regdom for self-managed drivers * @wiphy: the wireless device we want to process the regulatory domain on * @rd: the regulatory domain information to use for this wiphy * * This functions requires the RTNL and the wiphy mutex to be held and * applies the new regdomain synchronously to this wiphy. For more details * see regulatory_set_wiphy_regd(). * * Return: 0 on success. -EINVAL, -EPERM */ int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy, struct ieee80211_regdomain *rd); /** * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain * @wiphy: the wireless device we want to process the regulatory domain on * @regd: the custom regulatory domain to use for this wiphy * * Drivers can sometimes have custom regulatory domains which do not apply * to a specific country. Drivers can use this to apply such custom regulatory * domains. This routine must be called prior to wiphy registration. The * custom regulatory domain will be trusted completely and as such previous * default channel settings will be disregarded. If no rule is found for a * channel on the regulatory domain the channel will be disabled. * Drivers using this for a wiphy should also set the wiphy flag * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy * that called this helper. */ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd); /** * freq_reg_info - get regulatory information for the given frequency * @wiphy: the wiphy for which we want to process this rule for * @center_freq: Frequency in KHz for which we want regulatory information for * * Use this function to get the regulatory rule for a specific frequency on * a given wireless device. If the device has a specific regulatory domain * it wants to follow we respect that unless a country IE has been received * and processed already. * * Return: A valid pointer, or, when an error occurs, for example if no rule * can be found, the return value is encoded using ERR_PTR(). Use IS_ERR() to * check and PTR_ERR() to obtain the numeric return value. The numeric return * value will be -ERANGE if we determine the given center_freq does not even * have a regulatory rule for a frequency range in the center_freq's band. * See freq_in_rule_band() for our current definition of a band -- this is * purely subjective and right now it's 802.11 specific. */ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, u32 center_freq); /** * reg_initiator_name - map regulatory request initiator enum to name * @initiator: the regulatory request initiator * * You can use this to map the regulatory request initiator enum to a * proper string representation. * * Return: pointer to string representation of the initiator */ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); /** * regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom * @wiphy: wiphy for which pre-CAC capability is checked. * * Pre-CAC is allowed only in some regdomains (notable ETSI). * * Return: %true if allowed, %false otherwise */ bool regulatory_pre_cac_allowed(struct wiphy *wiphy); /** * DOC: Internal regulatory db functions * */ /** * reg_query_regdb_wmm - Query internal regulatory db for wmm rule * Regulatory self-managed driver can use it to proactively * * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried. * @freq: the frequency (in MHz) to be queried. * @rule: pointer to store the wmm rule from the regulatory db. * * Self-managed wireless drivers can use this function to query * the internal regulatory database to check whether the given * ISO/IEC 3166 alpha2 country and freq have wmm rule limitations. * * Drivers should check the return value, its possible you can get * an -ENODATA. * * Return: 0 on success. -ENODATA. */ int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule); /* * callbacks for asynchronous cfg80211 methods, notification * functions and BSS handling helpers */ /** * cfg80211_scan_done - notify that scan finished * * @request: the corresponding scan request * @info: information about the completed scan */ void cfg80211_scan_done(struct cfg80211_scan_request *request, struct cfg80211_scan_info *info); /** * cfg80211_sched_scan_results - notify that new scan results are available * * @wiphy: the wiphy which got scheduled scan results * @reqid: identifier for the related scheduled scan request */ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid); /** * cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped * * @wiphy: the wiphy on which the scheduled scan stopped * @reqid: identifier for the related scheduled scan request * * The driver can call this function to inform cfg80211 that the * scheduled scan had to be stopped, for whatever reason. The driver * is then called back via the sched_scan_stop operation when done. */ void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid); /** * cfg80211_sched_scan_stopped_locked - notify that the scheduled scan has stopped * * @wiphy: the wiphy on which the scheduled scan stopped * @reqid: identifier for the related scheduled scan request * * The driver can call this function to inform cfg80211 that the * scheduled scan had to be stopped, for whatever reason. The driver * is then called back via the sched_scan_stop operation when done. * This function should be called with the wiphy mutex held. */ void cfg80211_sched_scan_stopped_locked(struct wiphy *wiphy, u64 reqid); /** * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame * @wiphy: the wiphy reporting the BSS * @data: the BSS metadata * @mgmt: the management frame (probe response or beacon) * @len: length of the management frame * @gfp: context flags * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. * * Return: A referenced struct, must be released with cfg80211_put_bss()! * Or %NULL on error. */ struct cfg80211_bss * __must_check cfg80211_inform_bss_frame_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len, gfp_t gfp); static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, struct ieee80211_mgmt *mgmt, size_t len, s32 signal, gfp_t gfp) { struct cfg80211_inform_bss data = { .chan = rx_channel, .signal = signal, }; return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); } /** * cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID * @bssid: transmitter BSSID * @max_bssid: max BSSID indicator, taken from Multiple BSSID element * @mbssid_index: BSSID index, taken from Multiple BSSID index element * @new_bssid: calculated nontransmitted BSSID */ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid, u8 mbssid_index, u8 *new_bssid) { u64 bssid_u64 = ether_addr_to_u64(bssid); u64 mask = GENMASK_ULL(max_bssid - 1, 0); u64 new_bssid_u64; new_bssid_u64 = bssid_u64 & ~mask; new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask; u64_to_ether_addr(new_bssid_u64, new_bssid); } /** * cfg80211_is_element_inherited - returns if element ID should be inherited * @element: element to check * @non_inherit_element: non inheritance element * * Return: %true if should be inherited, %false otherwise */ bool cfg80211_is_element_inherited(const struct element *element, const struct element *non_inherit_element); /** * cfg80211_merge_profile - merges a MBSSID profile if it is split between IEs * @ie: ies * @ielen: length of IEs * @mbssid_elem: current MBSSID element * @sub_elem: current MBSSID subelement (profile) * @merged_ie: location of the merged profile * @max_copy_len: max merged profile length * * Return: the number of bytes merged */ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen, const struct element *mbssid_elem, const struct element *sub_elem, u8 *merged_ie, size_t max_copy_len); /** * enum cfg80211_bss_frame_type - frame type that the BSS data came from * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is * from a beacon or probe response * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response * @CFG80211_BSS_FTYPE_S1G_BEACON: data comes from an S1G beacon */ enum cfg80211_bss_frame_type { CFG80211_BSS_FTYPE_UNKNOWN, CFG80211_BSS_FTYPE_BEACON, CFG80211_BSS_FTYPE_PRESP, CFG80211_BSS_FTYPE_S1G_BEACON, }; /** * cfg80211_get_ies_channel_number - returns the channel number from ies * @ie: IEs * @ielen: length of IEs * @band: enum nl80211_band of the channel * * Return: the channel number, or -1 if none could be determined. */ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, enum nl80211_band band); /** * cfg80211_ssid_eq - compare two SSIDs * @a: first SSID * @b: second SSID * * Return: %true if SSIDs are equal, %false otherwise. */ static inline bool cfg80211_ssid_eq(struct cfg80211_ssid *a, struct cfg80211_ssid *b) { if (WARN_ON(!a || !b)) return false; if (a->ssid_len != b->ssid_len) return false; return memcmp(a->ssid, b->ssid, a->ssid_len) ? false : true; } /** * cfg80211_inform_bss_data - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS * @data: the BSS metadata * @ftype: frame type (if known) * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) * @capability: the capability field sent by the peer * @beacon_interval: the beacon interval announced by the peer * @ie: additional IEs sent by the peer * @ielen: length of the additional IEs * @gfp: context flags * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. * * Return: A referenced struct, must be released with cfg80211_put_bss()! * Or %NULL on error. */ struct cfg80211_bss * __must_check cfg80211_inform_bss_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, gfp_t gfp); static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { struct cfg80211_inform_bss data = { .chan = rx_channel, .signal = signal, }; return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, gfp); } /** * __cfg80211_get_bss - get a BSS reference * @wiphy: the wiphy this BSS struct belongs to * @channel: the channel to search on (or %NULL) * @bssid: the desired BSSID (or %NULL) * @ssid: the desired SSID (or %NULL) * @ssid_len: length of the SSID (or 0) * @bss_type: type of BSS, see &enum ieee80211_bss_type * @privacy: privacy filter, see &enum ieee80211_privacy * @use_for: indicates which use is intended * * Return: Reference-counted BSS on success. %NULL on error. */ struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, const u8 *ssid, size_t ssid_len, enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy, u32 use_for); /** * cfg80211_get_bss - get a BSS reference * @wiphy: the wiphy this BSS struct belongs to * @channel: the channel to search on (or %NULL) * @bssid: the desired BSSID (or %NULL) * @ssid: the desired SSID (or %NULL) * @ssid_len: length of the SSID (or 0) * @bss_type: type of BSS, see &enum ieee80211_bss_type * @privacy: privacy filter, see &enum ieee80211_privacy * * This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL. * * Return: Reference-counted BSS on success. %NULL on error. */ static inline struct cfg80211_bss * cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, const u8 *ssid, size_t ssid_len, enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy) { return __cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, bss_type, privacy, NL80211_BSS_USE_FOR_NORMAL); } static inline struct cfg80211_bss * cfg80211_get_ibss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *ssid, size_t ssid_len) { return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY_ANY); } /** * cfg80211_ref_bss - reference BSS struct * @wiphy: the wiphy this BSS struct belongs to * @bss: the BSS struct to reference * * Increments the refcount of the given BSS struct. */ void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** * cfg80211_put_bss - unref BSS struct * @wiphy: the wiphy this BSS struct belongs to * @bss: the BSS struct * * Decrements the refcount of the given BSS struct. */ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** * cfg80211_unlink_bss - unlink BSS from internal data structures * @wiphy: the wiphy * @bss: the bss to remove * * This function removes the given BSS from the internal data structures * thereby making it no longer show up in scan results etc. Use this * function when you detect a BSS is gone. Normally BSSes will also time * out, so it is not necessary to use this function at all. */ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** * cfg80211_bss_iter - iterate all BSS entries * * This function iterates over the BSS entries associated with the given wiphy * and calls the callback for the iterated BSS. The iterator function is not * allowed to call functions that might modify the internal state of the BSS DB. * * @wiphy: the wiphy * @chandef: if given, the iterator function will be called only if the channel * of the currently iterated BSS is a subset of the given channel. * @iter: the iterator function to call * @iter_data: an argument to the iterator function */ void cfg80211_bss_iter(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, void (*iter)(struct wiphy *wiphy, struct cfg80211_bss *bss, void *data), void *iter_data); /** * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame * @dev: network device * @buf: authentication frame (header + body) * @len: length of the frame data * * This function is called whenever an authentication, disassociation or * deauthentication frame has been received and processed in station mode. * After being asked to authenticate via cfg80211_ops::auth() the driver must * call either this function or cfg80211_auth_timeout(). * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). * While connected, the driver must calls this for received and processed * disassociation and deauthentication frames. If the frame couldn't be used * because it was unprotected, the driver must call the function * cfg80211_rx_unprot_mlme_mgmt() instead. * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** * cfg80211_auth_timeout - notification of timed out authentication * @dev: network device * @addr: The MAC address of the device with which the authentication timed out * * This function may sleep. The caller must hold the corresponding wdev's * mutex. */ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); /** * struct cfg80211_rx_assoc_resp_data - association response data * @buf: (Re)Association Response frame (header + body) * @len: length of the frame data * @uapsd_queues: bitmap of queues configured for uapsd. Same format * as the AC bitmap in the QoS info field * @req_ies: information elements from the (Re)Association Request frame * @req_ies_len: length of req_ies data * @ap_mld_addr: AP MLD address (in case of MLO) * @links: per-link information indexed by link ID, use links[0] for * non-MLO connections * @links.bss: the BSS that association was requested with, ownership of the * pointer moves to cfg80211 in the call to cfg80211_rx_assoc_resp() * @links.status: Set this (along with a BSS pointer) for links that * were rejected by the AP. */ struct cfg80211_rx_assoc_resp_data { const u8 *buf; size_t len; const u8 *req_ies; size_t req_ies_len; int uapsd_queues; const u8 *ap_mld_addr; struct { u8 addr[ETH_ALEN] __aligned(2); struct cfg80211_bss *bss; u16 status; } links[IEEE80211_MLD_MAX_NUM_LINKS]; }; /** * cfg80211_rx_assoc_resp - notification of processed association response * @dev: network device * @data: association response data, &struct cfg80211_rx_assoc_resp_data * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_rx_assoc_resp(struct net_device *dev, const struct cfg80211_rx_assoc_resp_data *data); /** * struct cfg80211_assoc_failure - association failure data * @ap_mld_addr: AP MLD address, or %NULL * @bss: list of BSSes, must use entry 0 for non-MLO connections * (@ap_mld_addr is %NULL) * @timeout: indicates the association failed due to timeout, otherwise * the association was abandoned for a reason reported through some * other API (e.g. deauth RX) */ struct cfg80211_assoc_failure { const u8 *ap_mld_addr; struct cfg80211_bss *bss[IEEE80211_MLD_MAX_NUM_LINKS]; bool timeout; }; /** * cfg80211_assoc_failure - notification of association failure * @dev: network device * @data: data describing the association failure * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_assoc_failure(struct net_device *dev, struct cfg80211_assoc_failure *data); /** * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame * @dev: network device * @buf: 802.11 frame (header + body) * @len: length of the frame data * @reconnect: immediate reconnect is desired (include the nl80211 attribute) * * This function is called whenever deauthentication has been processed in * station mode. This includes both received deauthentication frames and * locally generated ones. This function may sleep. The caller must hold the * corresponding wdev's mutex. */ void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len, bool reconnect); /** * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame * @dev: network device * @buf: received management frame (header + body) * @len: length of the frame data * * This function is called whenever a received deauthentication or dissassoc * frame has been dropped in station mode because of MFP being used but the * frame was not protected. This is also used to notify reception of a Beacon * frame that was dropped because it did not include a valid MME MIC while * beacon protection was enabled (BIGTK configured in station mode). * * This function may sleep. */ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) * @dev: network device * @addr: The source MAC address of the frame * @key_type: The key type that the received frame used * @key_id: Key identifier (0..3). Can be -1 if missing. * @tsc: The TSC value of the frame that generated the MIC failure (6 octets) * @gfp: allocation flags * * This function is called whenever the local MAC detects a MIC failure in a * received frame. This matches with MLME-MICHAELMICFAILURE.indication() * primitive. */ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp); /** * cfg80211_ibss_joined - notify cfg80211 that device joined an IBSS * * @dev: network device * @bssid: the BSSID of the IBSS joined * @channel: the channel of the IBSS joined * @gfp: allocation flags * * This function notifies cfg80211 that the device joined an IBSS or * switched to a different BSSID. Before this function can be called, * either a beacon has to have been received from the IBSS, or one of * the cfg80211_inform_bss{,_frame} functions must have been called * with the locally generated beacon -- this guarantees that there is * always a scan result for this IBSS. cfg80211 will handle the rest. */ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel, gfp_t gfp); /** * cfg80211_notify_new_peer_candidate - notify cfg80211 of a new mesh peer * candidate * * @dev: network device * @macaddr: the MAC address of the new candidate * @ie: information elements advertised by the peer candidate * @ie_len: length of the information elements buffer * @sig_dbm: signal level in dBm * @gfp: allocation flags * * This function notifies cfg80211 that the mesh peer candidate has been * detected, most likely via a beacon or, less likely, via a probe response. * cfg80211 then sends a notification to userspace. */ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *macaddr, const u8 *ie, u8 ie_len, int sig_dbm, gfp_t gfp); /** * DOC: RFkill integration * * RFkill integration in cfg80211 is almost invisible to drivers, * as cfg80211 automatically registers an rfkill instance for each * wireless device it knows about. Soft kill is also translated * into disconnecting and turning all interfaces off. Drivers are * expected to turn off the device when all interfaces are down. * * However, devices may have a hard RFkill line, in which case they * also need to interact with the rfkill subsystem, via cfg80211. * They can do this with a few helper functions documented here. */ /** * wiphy_rfkill_set_hw_state_reason - notify cfg80211 about hw block state * @wiphy: the wiphy * @blocked: block status * @reason: one of reasons in &enum rfkill_hard_block_reasons */ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked, enum rfkill_hard_block_reasons reason); static inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) { wiphy_rfkill_set_hw_state_reason(wiphy, blocked, RFKILL_HARD_BLOCK_SIGNAL); } /** * wiphy_rfkill_start_polling - start polling rfkill * @wiphy: the wiphy */ void wiphy_rfkill_start_polling(struct wiphy *wiphy); /** * wiphy_rfkill_stop_polling - stop polling rfkill * @wiphy: the wiphy */ static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy) { rfkill_pause_polling(wiphy->rfkill); } /** * DOC: Vendor commands * * Occasionally, there are special protocol or firmware features that * can't be implemented very openly. For this and similar cases, the * vendor command functionality allows implementing the features with * (typically closed-source) userspace and firmware, using nl80211 as * the configuration mechanism. * * A driver supporting vendor commands must register them as an array * in struct wiphy, with handlers for each one. Each command has an * OUI and sub command ID to identify it. * * Note that this feature should not be (ab)used to implement protocol * features that could openly be shared across drivers. In particular, * it must never be required to use vendor commands to implement any * "normal" functionality that higher-level userspace like connection * managers etc. need. */ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy, enum nl80211_commands cmd, enum nl80211_attrs attr, int approxlen); struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_commands cmd, enum nl80211_attrs attr, unsigned int portid, int vendor_event_idx, int approxlen, gfp_t gfp); void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp); /** * cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply * @wiphy: the wiphy * @approxlen: an upper bound of the length of the data that will * be put into the skb * * This function allocates and pre-fills an skb for a reply to * a vendor command. Since it is intended for a reply, calling * it outside of a vendor command's doit() operation is invalid. * * The returned skb is pre-filled with some identifying data in * a way that any data that is put into the skb (with skb_put(), * nla_put() or similar) will end up being within the * %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done * with the skb is adding data for the corresponding userspace tool * which can then read that data out of the vendor data attribute. * You must not modify the skb in any other way. * * When done, call cfg80211_vendor_cmd_reply() with the skb and return * its error code as the result of the doit() operation. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen) { return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR, NL80211_ATTR_VENDOR_DATA, approxlen); } /** * cfg80211_vendor_cmd_reply - send the reply skb * @skb: The skb, must have been allocated with * cfg80211_vendor_cmd_alloc_reply_skb() * * Since calling this function will usually be the last thing * before returning from the vendor command doit() you should * return the error code. Note that this function consumes the * skb regardless of the return value. * * Return: An error code or 0 on success. */ int cfg80211_vendor_cmd_reply(struct sk_buff *skb); /** * cfg80211_vendor_cmd_get_sender - get the current sender netlink ID * @wiphy: the wiphy * * Return: the current netlink port ID in a vendor command handler. * * Context: May only be called from a vendor command handler */ unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy); /** * cfg80211_vendor_event_alloc - allocate vendor-specific event skb * @wiphy: the wiphy * @wdev: the wireless device * @event_idx: index of the vendor event in the wiphy's vendor_events * @approxlen: an upper bound of the length of the data that will * be put into the skb * @gfp: allocation flags * * This function allocates and pre-fills an skb for an event on the * vendor-specific multicast group. * * If wdev != NULL, both the ifindex and identifier of the specified * wireless device are added to the event message before the vendor data * attribute. * * When done filling the skb, call cfg80211_vendor_event() with the * skb to send the event. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev, int approxlen, int event_idx, gfp_t gfp) { return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR, NL80211_ATTR_VENDOR_DATA, 0, event_idx, approxlen, gfp); } /** * cfg80211_vendor_event_alloc_ucast - alloc unicast vendor-specific event skb * @wiphy: the wiphy * @wdev: the wireless device * @event_idx: index of the vendor event in the wiphy's vendor_events * @portid: port ID of the receiver * @approxlen: an upper bound of the length of the data that will * be put into the skb * @gfp: allocation flags * * This function allocates and pre-fills an skb for an event to send to * a specific (userland) socket. This socket would previously have been * obtained by cfg80211_vendor_cmd_get_sender(), and the caller MUST take * care to register a netlink notifier to see when the socket closes. * * If wdev != NULL, both the ifindex and identifier of the specified * wireless device are added to the event message before the vendor data * attribute. * * When done filling the skb, call cfg80211_vendor_event() with the * skb to send the event. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_vendor_event_alloc_ucast(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int portid, int approxlen, int event_idx, gfp_t gfp) { return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR, NL80211_ATTR_VENDOR_DATA, portid, event_idx, approxlen, gfp); } /** * cfg80211_vendor_event - send the event * @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc() * @gfp: allocation flags * * This function sends the given @skb, which must have been allocated * by cfg80211_vendor_event_alloc(), as an event. It always consumes it. */ static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp) { __cfg80211_send_event_skb(skb, gfp); } #ifdef CONFIG_NL80211_TESTMODE /** * DOC: Test mode * * Test mode is a set of utility functions to allow drivers to * interact with driver-specific tools to aid, for instance, * factory programming. * * This chapter describes how drivers interact with it. For more * information see the nl80211 book's chapter on it. */ /** * cfg80211_testmode_alloc_reply_skb - allocate testmode reply * @wiphy: the wiphy * @approxlen: an upper bound of the length of the data that will * be put into the skb * * This function allocates and pre-fills an skb for a reply to * the testmode command. Since it is intended for a reply, calling * it outside of the @testmode_cmd operation is invalid. * * The returned skb is pre-filled with the wiphy index and set up in * a way that any data that is put into the skb (with skb_put(), * nla_put() or similar) will end up being within the * %NL80211_ATTR_TESTDATA attribute, so all that needs to be done * with the skb is adding data for the corresponding userspace tool * which can then read that data out of the testdata attribute. You * must not modify the skb in any other way. * * When done, call cfg80211_testmode_reply() with the skb and return * its error code as the result of the @testmode_cmd operation. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen) { return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE, NL80211_ATTR_TESTDATA, approxlen); } /** * cfg80211_testmode_reply - send the reply skb * @skb: The skb, must have been allocated with * cfg80211_testmode_alloc_reply_skb() * * Since calling this function will usually be the last thing * before returning from the @testmode_cmd you should return * the error code. Note that this function consumes the skb * regardless of the return value. * * Return: An error code or 0 on success. */ static inline int cfg80211_testmode_reply(struct sk_buff *skb) { return cfg80211_vendor_cmd_reply(skb); } /** * cfg80211_testmode_alloc_event_skb - allocate testmode event * @wiphy: the wiphy * @approxlen: an upper bound of the length of the data that will * be put into the skb * @gfp: allocation flags * * This function allocates and pre-fills an skb for an event on the * testmode multicast group. * * The returned skb is set up in the same way as with * cfg80211_testmode_alloc_reply_skb() but prepared for an event. As * there, you should simply add data to it that will then end up in the * %NL80211_ATTR_TESTDATA attribute. Again, you must not modify the skb * in any other way. * * When done filling the skb, call cfg80211_testmode_event() with the * skb to send the event. * * Return: An allocated and pre-filled skb. %NULL if any errors happen. */ static inline struct sk_buff * cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp) { return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE, NL80211_ATTR_TESTDATA, 0, -1, approxlen, gfp); } /** * cfg80211_testmode_event - send the event * @skb: The skb, must have been allocated with * cfg80211_testmode_alloc_event_skb() * @gfp: allocation flags * * This function sends the given @skb, which must have been allocated * by cfg80211_testmode_alloc_event_skb(), as an event. It always * consumes it. */ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) { __cfg80211_send_event_skb(skb, gfp); } #define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd), #define CFG80211_TESTMODE_DUMP(cmd) .testmode_dump = (cmd), #else #define CFG80211_TESTMODE_CMD(cmd) #define CFG80211_TESTMODE_DUMP(cmd) #endif /** * struct cfg80211_fils_resp_params - FILS connection response params * @kek: KEK derived from a successful FILS connection (may be %NULL) * @kek_len: Length of @fils_kek in octets * @update_erp_next_seq_num: Boolean value to specify whether the value in * @erp_next_seq_num is valid. * @erp_next_seq_num: The next sequence number to use in ERP message in * FILS Authentication. This value should be specified irrespective of the * status for a FILS connection. * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL). * @pmk_len: Length of @pmk in octets * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID * used for this FILS connection (may be %NULL). */ struct cfg80211_fils_resp_params { const u8 *kek; size_t kek_len; bool update_erp_next_seq_num; u16 erp_next_seq_num; const u8 *pmk; size_t pmk_len; const u8 *pmkid; }; /** * struct cfg80211_connect_resp_params - Connection response params * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. If this call is used to report a * failure due to a timeout (e.g., not receiving an Authentication frame * from the AP) instead of an explicit rejection by the AP, -1 is used to * indicate that this is a failure, but without a status code. * @timeout_reason is used to report the reason for the timeout in that * case. * @req_ie: Association request IEs (may be %NULL) * @req_ie_len: Association request IEs length * @resp_ie: Association response IEs (may be %NULL) * @resp_ie_len: Association response IEs length * @fils: FILS connection response parameters. * @timeout_reason: Reason for connection timeout. This is used when the * connection fails due to a timeout instead of an explicit rejection from * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is * not known. This value is used only if @status < 0 to indicate that the * failure is due to a timeout and not due to explicit rejection by the AP. * This value is ignored in other cases (@status >= 0). * @valid_links: For MLO connection, BIT mask of the valid link ids. Otherwise * zero. * @ap_mld_addr: For MLO connection, MLD address of the AP. Otherwise %NULL. * @links : For MLO connection, contains link info for the valid links indicated * using @valid_links. For non-MLO connection, links[0] contains the * connected AP info. * @links.addr: For MLO connection, MAC address of the STA link. Otherwise * %NULL. * @links.bssid: For MLO connection, MAC address of the AP link. For non-MLO * connection, links[0].bssid points to the BSSID of the AP (may be %NULL). * @links.bss: For MLO connection, entry of bss to which STA link is connected. * For non-MLO connection, links[0].bss points to entry of bss to which STA * is connected. It can be obtained through cfg80211_get_bss() (may be * %NULL). It is recommended to store the bss from the connect_request and * hold a reference to it and return through this param to avoid a warning * if the bss is expired during the connection, esp. for those drivers * implementing connect op. Only one parameter among @bssid and @bss needs * to be specified. * @links.status: per-link status code, to report a status code that's not * %WLAN_STATUS_SUCCESS for a given link, it must also be in the * @valid_links bitmap and may have a BSS pointer (which is then released) */ struct cfg80211_connect_resp_params { int status; const u8 *req_ie; size_t req_ie_len; const u8 *resp_ie; size_t resp_ie_len; struct cfg80211_fils_resp_params fils; enum nl80211_timeout_reason timeout_reason; const u8 *ap_mld_addr; u16 valid_links; struct { const u8 *addr; const u8 *bssid; struct cfg80211_bss *bss; u16 status; } links[IEEE80211_MLD_MAX_NUM_LINKS]; }; /** * cfg80211_connect_done - notify cfg80211 of connection result * * @dev: network device * @params: connection response parameters * @gfp: allocation flags * * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_bss(), but takes a structure pointer for connection response * parameters. Only one of the functions among cfg80211_connect_bss(), * cfg80211_connect_result(), cfg80211_connect_timeout(), * and cfg80211_connect_done() should be called. */ void cfg80211_connect_done(struct net_device *dev, struct cfg80211_connect_resp_params *params, gfp_t gfp); /** * cfg80211_connect_bss - notify cfg80211 of connection result * * @dev: network device * @bssid: the BSSID of the AP * @bss: Entry of bss to which STA got connected to, can be obtained through * cfg80211_get_bss() (may be %NULL). But it is recommended to store the * bss from the connect_request and hold a reference to it and return * through this param to avoid a warning if the bss is expired during the * connection, esp. for those drivers implementing connect op. * Only one parameter among @bssid and @bss needs to be specified. * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. If this call is used to report a * failure due to a timeout (e.g., not receiving an Authentication frame * from the AP) instead of an explicit rejection by the AP, -1 is used to * indicate that this is a failure, but without a status code. * @timeout_reason is used to report the reason for the timeout in that * case. * @gfp: allocation flags * @timeout_reason: reason for connection timeout. This is used when the * connection fails due to a timeout instead of an explicit rejection from * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is * not known. This value is used only if @status < 0 to indicate that the * failure is due to a timeout and not due to explicit rejection by the AP. * This value is ignored in other cases (@status >= 0). * * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_result(), but with the option of identifying the exact bss * entry for the connection. Only one of the functions among * cfg80211_connect_bss(), cfg80211_connect_result(), * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, int status, gfp_t gfp, enum nl80211_timeout_reason timeout_reason) { struct cfg80211_connect_resp_params params; memset(&params, 0, sizeof(params)); params.status = status; params.links[0].bssid = bssid; params.links[0].bss = bss; params.req_ie = req_ie; params.req_ie_len = req_ie_len; params.resp_ie = resp_ie; params.resp_ie_len = resp_ie_len; params.timeout_reason = timeout_reason; cfg80211_connect_done(dev, &params, gfp); } /** * cfg80211_connect_result - notify cfg80211 of connection result * * @dev: network device * @bssid: the BSSID of the AP * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you * the real status code for failures. * @gfp: allocation flags * * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(), * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, gfp_t gfp) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie, resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED); } /** * cfg80211_connect_timeout - notify cfg80211 of connection timeout * * @dev: network device * @bssid: the BSSID of the AP * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @gfp: allocation flags * @timeout_reason: reason for connection timeout. * * It should be called by the underlying driver whenever connect() has failed * in a sequence where no explicit authentication/association rejection was * received from the AP. This could happen, e.g., due to not being able to send * out the Authentication or Association Request frame or timing out while * waiting for the response. Only one of the functions among * cfg80211_connect_bss(), cfg80211_connect_result(), * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, gfp_t gfp, enum nl80211_timeout_reason timeout_reason) { cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1, gfp, timeout_reason); } /** * struct cfg80211_roam_info - driver initiated roaming information * * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length * @fils: FILS related roaming information. * @valid_links: For MLO roaming, BIT mask of the new valid links is set. * Otherwise zero. * @ap_mld_addr: For MLO roaming, MLD address of the new AP. Otherwise %NULL. * @links : For MLO roaming, contains new link info for the valid links set in * @valid_links. For non-MLO roaming, links[0] contains the new AP info. * @links.addr: For MLO roaming, MAC address of the STA link. Otherwise %NULL. * @links.bssid: For MLO roaming, MAC address of the new AP link. For non-MLO * roaming, links[0].bssid points to the BSSID of the new AP. May be * %NULL if %links.bss is set. * @links.channel: the channel of the new AP. * @links.bss: For MLO roaming, entry of new bss to which STA link got * roamed. For non-MLO roaming, links[0].bss points to entry of bss to * which STA got roamed (may be %NULL if %links.bssid is set) */ struct cfg80211_roam_info { const u8 *req_ie; size_t req_ie_len; const u8 *resp_ie; size_t resp_ie_len; struct cfg80211_fils_resp_params fils; const u8 *ap_mld_addr; u16 valid_links; struct { const u8 *addr; const u8 *bssid; struct ieee80211_channel *channel; struct cfg80211_bss *bss; } links[IEEE80211_MLD_MAX_NUM_LINKS]; }; /** * cfg80211_roamed - notify cfg80211 of roaming * * @dev: network device * @info: information about the new BSS. struct &cfg80211_roam_info. * @gfp: allocation flags * * This function may be called with the driver passing either the BSSID of the * new AP or passing the bss entry to avoid a race in timeout of the bss entry. * It should be called by the underlying driver whenever it roamed from one AP * to another while connected. Drivers which have roaming implemented in * firmware should pass the bss entry to avoid a race in bss entry timeout where * the bss entry of the new AP is seen in the driver, but gets timed out by the * time it is accessed in __cfg80211_roamed() due to delay in scheduling * rdev->event_work. In case of any failures, the reference is released * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be * released while disconnecting from the current bss. */ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, gfp_t gfp); /** * cfg80211_port_authorized - notify cfg80211 of successful security association * * @dev: network device * @peer_addr: BSSID of the AP/P2P GO in case of STA/GC or STA/GC MAC address * in case of AP/P2P GO * @td_bitmap: transition disable policy * @td_bitmap_len: Length of transition disable policy * @gfp: allocation flags * * This function should be called by a driver that supports 4 way handshake * offload after a security association was successfully established (i.e., * the 4 way handshake was completed successfully). The call to this function * should be preceded with a call to cfg80211_connect_result(), * cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to * indicate the 802.11 association. * This function can also be called by AP/P2P GO driver that supports * authentication offload. In this case the peer_mac passed is that of * associated STA/GC. */ void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr, const u8* td_bitmap, u8 td_bitmap_len, gfp_t gfp); /** * cfg80211_disconnected - notify cfg80211 that connection was dropped * * @dev: network device * @ie: information elements of the deauth/disassoc frame (may be %NULL) * @ie_len: length of IEs * @reason: reason code for the disconnection, set it to 0 if unknown * @locally_generated: disconnection was requested locally * @gfp: allocation flags * * After it calls this function, the driver should enter an idle state * and not try to connect to any AP any more. */ void cfg80211_disconnected(struct net_device *dev, u16 reason, const u8 *ie, size_t ie_len, bool locally_generated, gfp_t gfp); /** * cfg80211_ready_on_channel - notification of remain_on_channel start * @wdev: wireless device * @cookie: the request cookie * @chan: The current channel (from remain_on_channel request) * @duration: Duration in milliseconds that the driver intents to remain on the * channel * @gfp: allocation flags */ void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp); /** * cfg80211_remain_on_channel_expired - remain_on_channel duration expired * @wdev: wireless device * @cookie: the request cookie * @chan: The current channel (from remain_on_channel request) * @gfp: allocation flags */ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, gfp_t gfp); /** * cfg80211_tx_mgmt_expired - tx_mgmt duration expired * @wdev: wireless device * @cookie: the requested cookie * @chan: The current channel (from tx_mgmt request) * @gfp: allocation flags */ void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, gfp_t gfp); /** * cfg80211_sinfo_alloc_tid_stats - allocate per-tid statistics. * * @sinfo: the station information * @gfp: allocation flags * * Return: 0 on success. Non-zero on error. */ int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp); /** * cfg80211_link_sinfo_alloc_tid_stats - allocate per-tid statistics. * * @link_sinfo: the link station information * @gfp: allocation flags * * Return: 0 on success. Non-zero on error. */ int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo, gfp_t gfp); /** * cfg80211_sinfo_release_content - release contents of station info * @sinfo: the station information * * Releases any potentially allocated sub-information of the station * information, but not the struct itself (since it's typically on * the stack.) */ static inline void cfg80211_sinfo_release_content(struct station_info *sinfo) { kfree(sinfo->pertid); for (int link_id = 0; link_id < ARRAY_SIZE(sinfo->links); link_id++) { if (sinfo->links[link_id]) { kfree(sinfo->links[link_id]->pertid); kfree(sinfo->links[link_id]); } } } /** * cfg80211_new_sta - notify userspace about station * * @dev: the netdev * @mac_addr: the station's address * @sinfo: the station information * @gfp: allocation flags */ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp); /** * cfg80211_del_sta_sinfo - notify userspace about deletion of a station * @dev: the netdev * @mac_addr: the station's address. For MLD station, MLD address is used. * @sinfo: the station information/statistics * @gfp: allocation flags */ void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp); /** * cfg80211_del_sta - notify userspace about deletion of a station * * @dev: the netdev * @mac_addr: the station's address. For MLD station, MLD address is used. * @gfp: allocation flags */ static inline void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) { cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp); } /** * cfg80211_conn_failed - connection request failed notification * * @dev: the netdev * @mac_addr: the station's address * @reason: the reason for connection failure * @gfp: allocation flags * * Whenever a station tries to connect to an AP and if the station * could not connect to the AP as the AP has rejected the connection * for some reasons, this function is called. * * The reason for connection failure can be any of the value from * nl80211_connect_failed_reason enum */ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, enum nl80211_connect_failed_reason reason, gfp_t gfp); /** * struct cfg80211_rx_info - received management frame info * * @freq: Frequency on which the frame was received in kHz * @sig_dbm: signal strength in dBm, or 0 if unknown * @have_link_id: indicates the frame was received on a link of * an MLD, i.e. the @link_id field is valid * @link_id: the ID of the link the frame was received on * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in &enum nl80211_rxmgmt_flags * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds */ struct cfg80211_rx_info { int freq; int sig_dbm; bool have_link_id; u8 link_id; const u8 *buf; size_t len; u32 flags; u64 rx_tstamp; u64 ack_tstamp; }; /** * cfg80211_rx_mgmt_ext - management frame notification with extended info * @wdev: wireless device receiving the frame * @info: RX info as defined in struct cfg80211_rx_info * * This function is called whenever an Action frame is received for a station * mode interface, but is not processed in kernel. * * Return: %true if a user space application has registered for this frame. * For action frames, that makes it responsible for rejecting unrecognized * action frames; %false otherwise, in which case for action frames the * driver is responsible for rejecting the frame. */ bool cfg80211_rx_mgmt_ext(struct wireless_dev *wdev, struct cfg80211_rx_info *info); /** * cfg80211_rx_mgmt_khz - notification of received, unprocessed management frame * @wdev: wireless device receiving the frame * @freq: Frequency on which the frame was received in KHz * @sig_dbm: signal strength in dBm, or 0 if unknown * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags * * This function is called whenever an Action frame is received for a station * mode interface, but is not processed in kernel. * * Return: %true if a user space application has registered for this frame. * For action frames, that makes it responsible for rejecting unrecognized * action frames; %false otherwise, in which case for action frames the * driver is responsible for rejecting the frame. */ static inline bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags) { struct cfg80211_rx_info info = { .freq = freq, .sig_dbm = sig_dbm, .buf = buf, .len = len, .flags = flags }; return cfg80211_rx_mgmt_ext(wdev, &info); } /** * cfg80211_rx_mgmt - notification of received, unprocessed management frame * @wdev: wireless device receiving the frame * @freq: Frequency on which the frame was received in MHz * @sig_dbm: signal strength in dBm, or 0 if unknown * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags * * This function is called whenever an Action frame is received for a station * mode interface, but is not processed in kernel. * * Return: %true if a user space application has registered for this frame. * For action frames, that makes it responsible for rejecting unrecognized * action frames; %false otherwise, in which case for action frames the * driver is responsible for rejecting the frame. */ static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags) { struct cfg80211_rx_info info = { .freq = MHZ_TO_KHZ(freq), .sig_dbm = sig_dbm, .buf = buf, .len = len, .flags = flags }; return cfg80211_rx_mgmt_ext(wdev, &info); } /** * struct cfg80211_tx_status - TX status for management frame information * * @cookie: Cookie returned by cfg80211_ops::mgmt_tx() * @tx_tstamp: hardware TX timestamp in nanoseconds * @ack_tstamp: hardware ack RX timestamp in nanoseconds * @buf: Management frame (header + body) * @len: length of the frame data * @ack: Whether frame was acknowledged */ struct cfg80211_tx_status { u64 cookie; u64 tx_tstamp; u64 ack_tstamp; const u8 *buf; size_t len; bool ack; }; /** * cfg80211_mgmt_tx_status_ext - TX status notification with extended info * @wdev: wireless device receiving the frame * @status: TX status data * @gfp: context flags * * This function is called whenever a management frame was requested to be * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the * transmission attempt with extended info. */ void cfg80211_mgmt_tx_status_ext(struct wireless_dev *wdev, struct cfg80211_tx_status *status, gfp_t gfp); /** * cfg80211_mgmt_tx_status - notification of TX status for management frame * @wdev: wireless device receiving the frame * @cookie: Cookie returned by cfg80211_ops::mgmt_tx() * @buf: Management frame (header + body) * @len: length of the frame data * @ack: Whether frame was acknowledged * @gfp: context flags * * This function is called whenever a management frame was requested to be * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the * transmission attempt. */ static inline void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp) { struct cfg80211_tx_status status = { .cookie = cookie, .buf = buf, .len = len, .ack = ack }; cfg80211_mgmt_tx_status_ext(wdev, &status, gfp); } /** * cfg80211_control_port_tx_status - notification of TX status for control * port frames * @wdev: wireless device receiving the frame * @cookie: Cookie returned by cfg80211_ops::tx_control_port() * @buf: Data frame (header + body) * @len: length of the frame data * @ack: Whether frame was acknowledged * @gfp: context flags * * This function is called whenever a control port frame was requested to be * transmitted with cfg80211_ops::tx_control_port() to report the TX status of * the transmission attempt. */ void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp); /** * cfg80211_rx_control_port - notification about a received control port frame * @dev: The device the frame matched to * @skb: The skbuf with the control port frame. It is assumed that the skbuf * is 802.3 formatted (with 802.3 header). The skb can be non-linear. * This function does not take ownership of the skb, so the caller is * responsible for any cleanup. The caller must also ensure that * skb->protocol is set appropriately. * @unencrypted: Whether the frame was received unencrypted * @link_id: the link the frame was received on, -1 if not applicable or unknown * * This function is used to inform userspace about a received control port * frame. It should only be used if userspace indicated it wants to receive * control port frames over nl80211. * * The frame is the data portion of the 802.3 or 802.11 data frame with all * network layer headers removed (e.g. the raw EAPoL frame). * * Return: %true if the frame was passed to userspace */ bool cfg80211_rx_control_port(struct net_device *dev, struct sk_buff *skb, bool unencrypted, int link_id); /** * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event * @dev: network device * @rssi_event: the triggered RSSI event * @rssi_level: new RSSI level value or 0 if not available * @gfp: context flags * * This function is called when a configured connection quality monitoring * rssi threshold reached event occurs. */ void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level, gfp_t gfp); /** * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer * @dev: network device * @peer: peer's MAC address * @num_packets: how many packets were lost -- should be a fixed threshold * but probably no less than maybe 50, or maybe a throughput dependent * threshold (to account for temporary interference) * @gfp: context flags */ void cfg80211_cqm_pktloss_notify(struct net_device *dev, const u8 *peer, u32 num_packets, gfp_t gfp); /** * cfg80211_cqm_txe_notify - TX error rate event * @dev: network device * @peer: peer's MAC address * @num_packets: how many packets were lost * @rate: % of packets which failed transmission * @intvl: interval (in s) over which the TX failure threshold was breached. * @gfp: context flags * * Notify userspace when configured % TX failures over number of packets in a * given interval is exceeded. */ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, u32 num_packets, u32 rate, u32 intvl, gfp_t gfp); /** * cfg80211_cqm_beacon_loss_notify - beacon loss event * @dev: network device * @gfp: context flags * * Notify userspace about beacon loss from the connected AP. */ void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp); /** * __cfg80211_radar_event - radar detection event * @wiphy: the wiphy * @chandef: chandef for the current channel * @offchan: the radar has been detected on the offchannel chain * @gfp: context flags * * This function is called when a radar is detected on the current chanenl. */ void __cfg80211_radar_event(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, bool offchan, gfp_t gfp); static inline void cfg80211_radar_event(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, gfp_t gfp) { __cfg80211_radar_event(wiphy, chandef, false, gfp); } static inline void cfg80211_background_radar_event(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, gfp_t gfp) { __cfg80211_radar_event(wiphy, chandef, true, gfp); } /** * cfg80211_sta_opmode_change_notify - STA's ht/vht operation mode change event * @dev: network device * @mac: MAC address of a station which opmode got modified * @sta_opmode: station's current opmode value * @gfp: context flags * * Driver should call this function when station's opmode modified via action * frame. */ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac, struct sta_opmode_info *sta_opmode, gfp_t gfp); /** * cfg80211_cac_event - Channel availability check (CAC) event * @netdev: network device * @chandef: chandef for the current channel * @event: type of event * @gfp: context flags * @link_id: valid link_id for MLO operation or 0 otherwise. * * This function is called when a Channel availability check (CAC) is finished * or aborted. This must be called to notify the completion of a CAC process, * also by full-MAC drivers. */ void cfg80211_cac_event(struct net_device *netdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, gfp_t gfp, unsigned int link_id); /** * cfg80211_background_cac_abort - Channel Availability Check offchan abort event * @wiphy: the wiphy * * This function is called by the driver when a Channel Availability Check * (CAC) is aborted by a offchannel dedicated chain. */ void cfg80211_background_cac_abort(struct wiphy *wiphy); /** * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying * @dev: network device * @bssid: BSSID of AP (to avoid races) * @replay_ctr: new replay counter * @gfp: allocation flags */ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp); /** * cfg80211_pmksa_candidate_notify - notify about PMKSA caching candidate * @dev: network device * @index: candidate index (the smaller the index, the higher the priority) * @bssid: BSSID of AP * @preauth: Whether AP advertises support for RSN pre-authentication * @gfp: allocation flags */ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, const u8 *bssid, bool preauth, gfp_t gfp); /** * cfg80211_rx_spurious_frame - inform userspace about a spurious frame * @dev: The device the frame matched to * @link_id: the link the frame was received on, -1 if not applicable or unknown * @addr: the transmitter address * @gfp: context flags * * This function is used in AP mode (only!) to inform userspace that * a spurious class 3 frame was received, to be able to deauth the * sender. * Return: %true if the frame was passed to userspace (or this failed * for a reason other than not having a subscription.) */ bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr, int link_id, gfp_t gfp); /** * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame * @dev: The device the frame matched to * @addr: the transmitter address * @link_id: the link the frame was received on, -1 if not applicable or unknown * @gfp: context flags * * This function is used in AP mode (only!) to inform userspace that * an associated station sent a 4addr frame but that wasn't expected. * It is allowed and desirable to send this event only once for each * station to avoid event flooding. * Return: %true if the frame was passed to userspace (or this failed * for a reason other than not having a subscription.) */ bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr, int link_id, gfp_t gfp); /** * cfg80211_probe_status - notify userspace about probe status * @dev: the device the probe was sent on * @addr: the address of the peer * @cookie: the cookie filled in @probe_client previously * @acked: indicates whether probe was acked or not * @ack_signal: signal strength (in dBm) of the ACK frame. * @is_valid_ack_signal: indicates the ack_signal is valid or not. * @gfp: allocation flags */ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, u64 cookie, bool acked, s32 ack_signal, bool is_valid_ack_signal, gfp_t gfp); /** * cfg80211_report_obss_beacon_khz - report beacon from other APs * @wiphy: The wiphy that received the beacon * @frame: the frame * @len: length of the frame * @freq: frequency the frame was received on in KHz * @sig_dbm: signal strength in dBm, or 0 if unknown * * Use this function to report to userspace when a beacon was * received. It is not useful to call this when there is no * netdev that is in AP/GO mode. */ void cfg80211_report_obss_beacon_khz(struct wiphy *wiphy, const u8 *frame, size_t len, int freq, int sig_dbm); /** * cfg80211_report_obss_beacon - report beacon from other APs * @wiphy: The wiphy that received the beacon * @frame: the frame * @len: length of the frame * @freq: frequency the frame was received on * @sig_dbm: signal strength in dBm, or 0 if unknown * * Use this function to report to userspace when a beacon was * received. It is not useful to call this when there is no * netdev that is in AP/GO mode. */ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy, const u8 *frame, size_t len, int freq, int sig_dbm) { cfg80211_report_obss_beacon_khz(wiphy, frame, len, MHZ_TO_KHZ(freq), sig_dbm); } /** * struct cfg80211_beaconing_check_config - beacon check configuration * @iftype: the interface type to check for * @relax: allow IR-relaxation conditions to apply (e.g. another * interface connected already on the same channel) * NOTE: If this is set, wiphy mutex must be held. * @reg_power: &enum ieee80211_ap_reg_power value indicating the * advertised/used 6 GHz regulatory power setting */ struct cfg80211_beaconing_check_config { enum nl80211_iftype iftype; enum ieee80211_ap_reg_power reg_power; bool relax; }; /** * cfg80211_reg_check_beaconing - check if beaconing is allowed * @wiphy: the wiphy * @chandef: the channel definition * @cfg: additional parameters for the checking * * Return: %true if there is no secondary channel or the secondary channel(s) * can be used for beaconing (i.e. is not a radar channel etc.) */ bool cfg80211_reg_check_beaconing(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, struct cfg80211_beaconing_check_config *cfg); /** * cfg80211_reg_can_beacon - check if beaconing is allowed * @wiphy: the wiphy * @chandef: the channel definition * @iftype: interface type * * Return: %true if there is no secondary channel or the secondary channel(s) * can be used for beaconing (i.e. is not a radar channel etc.) */ static inline bool cfg80211_reg_can_beacon(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype) { struct cfg80211_beaconing_check_config config = { .iftype = iftype, }; return cfg80211_reg_check_beaconing(wiphy, chandef, &config); } /** * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation * @wiphy: the wiphy * @chandef: the channel definition * @iftype: interface type * * Return: %true if there is no secondary channel or the secondary channel(s) * can be used for beaconing (i.e. is not a radar channel etc.). This version * also checks if IR-relaxation conditions apply, to allow beaconing under * more permissive conditions. * * Context: Requires the wiphy mutex to be held. */ static inline bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype) { struct cfg80211_beaconing_check_config config = { .iftype = iftype, .relax = true, }; return cfg80211_reg_check_beaconing(wiphy, chandef, &config); } /** * cfg80211_ch_switch_notify - update wdev channel and notify userspace * @dev: the device which switched channels * @chandef: the new channel definition * @link_id: the link ID for MLO, must be 0 for non-MLO * * Caller must hold wiphy mutex, therefore must only be called from sleepable * driver context! */ void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, unsigned int link_id); /** * cfg80211_ch_switch_started_notify - notify channel switch start * @dev: the device on which the channel switch started * @chandef: the future channel definition * @link_id: the link ID for MLO, must be 0 for non-MLO * @count: the number of TBTTs until the channel switch happens * @quiet: whether or not immediate quiet was requested by the AP * * Inform the userspace about the channel switch that has just * started, so that it can take appropriate actions (eg. starting * channel switch on other vifs), if necessary. */ void cfg80211_ch_switch_started_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, unsigned int link_id, u8 count, bool quiet); /** * ieee80211_operating_class_to_band - convert operating class to band * * @operating_class: the operating class to convert * @band: band pointer to fill * * Return: %true if the conversion was successful, %false otherwise. */ bool ieee80211_operating_class_to_band(u8 operating_class, enum nl80211_band *band); /** * ieee80211_operating_class_to_chandef - convert operating class to chandef * * @operating_class: the operating class to convert * @chan: the ieee80211_channel to convert * @chandef: a pointer to the resulting chandef * * Return: %true if the conversion was successful, %false otherwise. */ bool ieee80211_operating_class_to_chandef(u8 operating_class, struct ieee80211_channel *chan, struct cfg80211_chan_def *chandef); /** * ieee80211_chandef_to_operating_class - convert chandef to operation class * * @chandef: the chandef to convert * @op_class: a pointer to the resulting operating class * * Return: %true if the conversion was successful, %false otherwise. */ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class); /** * ieee80211_chandef_to_khz - convert chandef to frequency in KHz * * @chandef: the chandef to convert * * Return: the center frequency of chandef (1st segment) in KHz. */ static inline u32 ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef) { return MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset; } /** * cfg80211_tdls_oper_request - request userspace to perform TDLS operation * @dev: the device on which the operation is requested * @peer: the MAC address of the peer device * @oper: the requested TDLS operation (NL80211_TDLS_SETUP or * NL80211_TDLS_TEARDOWN) * @reason_code: the reason code for teardown request * @gfp: allocation flags * * This function is used to request userspace to perform TDLS operation that * requires knowledge of keys, i.e., link setup or teardown when the AP * connection uses encryption. This is optional mechanism for the driver to use * if it can automatically determine when a TDLS link could be useful (e.g., * based on traffic and signal strength for a peer). */ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp); /** * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units) * @rate: given rate_info to calculate bitrate from * * Return: calculated bitrate */ u32 cfg80211_calculate_bitrate(struct rate_info *rate); /** * cfg80211_unregister_wdev - remove the given wdev * @wdev: struct wireless_dev to remove * * This function removes the device so it can no longer be used. It is necessary * to call this function even when cfg80211 requests the removal of the device * by calling the del_virtual_intf() callback. The function must also be called * when the driver wishes to unregister the wdev, e.g. when the hardware device * is unbound from the driver. * * Context: Requires the RTNL and wiphy mutex to be held. */ void cfg80211_unregister_wdev(struct wireless_dev *wdev); /** * cfg80211_register_netdevice - register the given netdev * @dev: the netdev to register * * Note: In contexts coming from cfg80211 callbacks, you must call this rather * than register_netdevice(), unregister_netdev() is impossible as the RTNL is * held. Otherwise, both register_netdevice() and register_netdev() are usable * instead as well. * * Context: Requires the RTNL and wiphy mutex to be held. * * Return: 0 on success. Non-zero on error. */ int cfg80211_register_netdevice(struct net_device *dev); /** * cfg80211_unregister_netdevice - unregister the given netdev * @dev: the netdev to register * * Note: In contexts coming from cfg80211 callbacks, you must call this rather * than unregister_netdevice(), unregister_netdev() is impossible as the RTNL * is held. Otherwise, both unregister_netdevice() and unregister_netdev() are * usable instead as well. * * Context: Requires the RTNL and wiphy mutex to be held. */ static inline void cfg80211_unregister_netdevice(struct net_device *dev) { #if IS_ENABLED(CONFIG_CFG80211) cfg80211_unregister_wdev(dev->ieee80211_ptr); #endif } /** * struct cfg80211_ft_event_params - FT Information Elements * @ies: FT IEs * @ies_len: length of the FT IE in bytes * @target_ap: target AP's MAC address * @ric_ies: RIC IE * @ric_ies_len: length of the RIC IE in bytes */ struct cfg80211_ft_event_params { const u8 *ies; size_t ies_len; const u8 *target_ap; const u8 *ric_ies; size_t ric_ies_len; }; /** * cfg80211_ft_event - notify userspace about FT IE and RIC IE * @netdev: network device * @ft_event: IE information */ void cfg80211_ft_event(struct net_device *netdev, struct cfg80211_ft_event_params *ft_event); /** * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer * @ies: the input IE buffer * @len: the input length * @attr: the attribute ID to find * @buf: output buffer, can be %NULL if the data isn't needed, e.g. * if the function is only called to get the needed buffer size * @bufsize: size of the output buffer * * The function finds a given P2P attribute in the (vendor) IEs and * copies its contents to the given buffer. * * Return: A negative error code (-%EILSEQ or -%ENOENT) if the data is * malformed or the attribute can't be found (respectively), or the * length of the found attribute (which can be zero). */ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, enum ieee80211_p2p_attr_id attr, u8 *buf, unsigned int bufsize); /** * ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC) * @ies: the IE buffer * @ielen: the length of the IE buffer * @ids: an array with element IDs that are allowed before * the split. A WLAN_EID_EXTENSION value means that the next * EID in the list is a sub-element of the EXTENSION IE. * @n_ids: the size of the element ID array * @after_ric: array IE types that come after the RIC element * @n_after_ric: size of the @after_ric array * @offset: offset where to start splitting in the buffer * * This function splits an IE buffer by updating the @offset * variable to point to the location where the buffer should be * split. * * It assumes that the given IE buffer is well-formed, this * has to be guaranteed by the caller! * * It also assumes that the IEs in the buffer are ordered * correctly, if not the result of using this function will not * be ordered correctly either, i.e. it does no reordering. * * Return: The offset where the next part of the buffer starts, which * may be @ielen if the entire (remainder) of the buffer should be * used. */ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, const u8 *after_ric, int n_after_ric, size_t offset); /** * ieee80211_ie_split - split an IE buffer according to ordering * @ies: the IE buffer * @ielen: the length of the IE buffer * @ids: an array with element IDs that are allowed before * the split. A WLAN_EID_EXTENSION value means that the next * EID in the list is a sub-element of the EXTENSION IE. * @n_ids: the size of the element ID array * @offset: offset where to start splitting in the buffer * * This function splits an IE buffer by updating the @offset * variable to point to the location where the buffer should be * split. * * It assumes that the given IE buffer is well-formed, this * has to be guaranteed by the caller! * * It also assumes that the IEs in the buffer are ordered * correctly, if not the result of using this function will not * be ordered correctly either, i.e. it does no reordering. * * Return: The offset where the next part of the buffer starts, which * may be @ielen if the entire (remainder) of the buffer should be * used. */ static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, size_t offset) { return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset); } /** * ieee80211_fragment_element - fragment the last element in skb * @skb: The skbuf that the element was added to * @len_pos: Pointer to length of the element to fragment * @frag_id: The element ID to use for fragments * * This function fragments all data after @len_pos, adding fragmentation * elements with the given ID as appropriate. The SKB will grow in size * accordingly. */ void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id); /** * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN * @wdev: the wireless device reporting the wakeup * @wakeup: the wakeup report * @gfp: allocation flags * * This function reports that the given device woke up. If it * caused the wakeup, report the reason(s), otherwise you may * pass %NULL as the @wakeup parameter to advertise that something * else caused the wakeup. */ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp); /** * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver. * * @wdev: the wireless device for which critical protocol is stopped. * @gfp: allocation flags * * This function can be called by the driver to indicate it has reverted * operation back to normal. One reason could be that the duration given * by .crit_proto_start() has expired. */ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp); /** * ieee80211_get_num_supported_channels - get number of channels device has * @wiphy: the wiphy * * Return: the number of channels supported by the device. */ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy); /** * cfg80211_check_combinations - check interface combinations * * @wiphy: the wiphy * @params: the interface combinations parameter * * This function can be called by the driver to check whether a * combination of interfaces and their types are allowed according to * the interface combinations. * * Return: 0 if combinations are allowed. Non-zero on error. */ int cfg80211_check_combinations(struct wiphy *wiphy, struct iface_combination_params *params); /** * cfg80211_iter_combinations - iterate over matching combinations * * @wiphy: the wiphy * @params: the interface combinations parameter * @iter: function to call for each matching combination * @data: pointer to pass to iter function * * This function can be called by the driver to check what possible * combinations it fits in at a given moment, e.g. for channel switching * purposes. * * Return: 0 on success. Non-zero on error. */ int cfg80211_iter_combinations(struct wiphy *wiphy, struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data); /** * cfg80211_get_radio_idx_by_chan - get the radio index by the channel * * @wiphy: the wiphy * @chan: channel for which the supported radio index is required * * Return: radio index on success or -EINVAL otherwise */ int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy, const struct ieee80211_channel *chan); /** * cfg80211_stop_iface - trigger interface disconnection * * @wiphy: the wiphy * @wdev: wireless device * @gfp: context flags * * Trigger interface to be stopped as if AP was stopped, IBSS/mesh left, STA * disconnected. * * Note: This doesn't need any locks and is asynchronous. */ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, gfp_t gfp); /** * cfg80211_shutdown_all_interfaces - shut down all interfaces for a wiphy * @wiphy: the wiphy to shut down * * This function shuts down all interfaces belonging to this wiphy by * calling dev_close() (and treating non-netdev interfaces as needed). * It shouldn't really be used unless there are some fatal device errors * that really can't be recovered in any other way. * * Callers must hold the RTNL and be able to deal with callbacks into * the driver while the function is running. */ void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy); /** * wiphy_ext_feature_set - set the extended feature flag * * @wiphy: the wiphy to modify. * @ftidx: extended feature bit index. * * The extended features are flagged in multiple bytes (see * &struct wiphy.@ext_features) */ static inline void wiphy_ext_feature_set(struct wiphy *wiphy, enum nl80211_ext_feature_index ftidx) { u8 *ft_byte; ft_byte = &wiphy->ext_features[ftidx / 8]; *ft_byte |= BIT(ftidx % 8); } /** * wiphy_ext_feature_isset - check the extended feature flag * * @wiphy: the wiphy to modify. * @ftidx: extended feature bit index. * * The extended features are flagged in multiple bytes (see * &struct wiphy.@ext_features) * * Return: %true if extended feature flag is set, %false otherwise */ static inline bool wiphy_ext_feature_isset(struct wiphy *wiphy, enum nl80211_ext_feature_index ftidx) { u8 ft_byte; ft_byte = wiphy->ext_features[ftidx / 8]; return (ft_byte & BIT(ftidx % 8)) != 0; } /** * cfg80211_free_nan_func - free NAN function * @f: NAN function that should be freed * * Frees all the NAN function and all it's allocated members. */ void cfg80211_free_nan_func(struct cfg80211_nan_func *f); /** * struct cfg80211_nan_match_params - NAN match parameters * @type: the type of the function that triggered a match. If it is * %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber. * If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery * result. * If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up. * @inst_id: the local instance id * @peer_inst_id: the instance id of the peer's function * @addr: the MAC address of the peer * @info_len: the length of the &info * @info: the Service Specific Info from the peer (if any) * @cookie: unique identifier of the corresponding function */ struct cfg80211_nan_match_params { enum nl80211_nan_function_type type; u8 inst_id; u8 peer_inst_id; const u8 *addr; u8 info_len; const u8 *info; u64 cookie; }; /** * cfg80211_nan_match - report a match for a NAN function. * @wdev: the wireless device reporting the match * @match: match notification parameters * @gfp: allocation flags * * This function reports that the a NAN function had a match. This * can be a subscribe that had a match or a solicited publish that * was sent. It can also be a follow up that was received. */ void cfg80211_nan_match(struct wireless_dev *wdev, struct cfg80211_nan_match_params *match, gfp_t gfp); /** * cfg80211_nan_func_terminated - notify about NAN function termination. * * @wdev: the wireless device reporting the match * @inst_id: the local instance id * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*) * @cookie: unique NAN function identifier * @gfp: allocation flags * * This function reports that the a NAN function is terminated. */ void cfg80211_nan_func_terminated(struct wireless_dev *wdev, u8 inst_id, enum nl80211_nan_func_term_reason reason, u64 cookie, gfp_t gfp); /* ethtool helper */ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info); /** * cfg80211_external_auth_request - userspace request for authentication * @netdev: network device * @params: External authentication parameters * @gfp: allocation flags * Returns: 0 on success, < 0 on error */ int cfg80211_external_auth_request(struct net_device *netdev, struct cfg80211_external_auth_params *params, gfp_t gfp); /** * cfg80211_pmsr_report - report peer measurement result data * @wdev: the wireless device reporting the measurement * @req: the original measurement request * @result: the result data * @gfp: allocation flags */ void cfg80211_pmsr_report(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, struct cfg80211_pmsr_result *result, gfp_t gfp); /** * cfg80211_pmsr_complete - report peer measurement completed * @wdev: the wireless device reporting the measurement * @req: the original measurement request * @gfp: allocation flags * * Report that the entire measurement completed, after this * the request pointer will no longer be valid. */ void cfg80211_pmsr_complete(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, gfp_t gfp); /** * cfg80211_iftype_allowed - check whether the interface can be allowed * @wiphy: the wiphy * @iftype: interface type * @is_4addr: use_4addr flag, must be '0' when check_swif is '1' * @check_swif: check iftype against software interfaces * * Check whether the interface is allowed to operate; additionally, this API * can be used to check iftype against the software interfaces when * check_swif is '1'. * * Return: %true if allowed, %false otherwise */ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, bool is_4addr, u8 check_swif); /** * cfg80211_assoc_comeback - notification of association that was * temporarily rejected with a comeback * @netdev: network device * @ap_addr: AP (MLD) address that rejected the association * @timeout: timeout interval value TUs. * * this function may sleep. the caller must hold the corresponding wdev's mutex. */ void cfg80211_assoc_comeback(struct net_device *netdev, const u8 *ap_addr, u32 timeout); /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ #define wiphy_printk(level, wiphy, format, args...) \ dev_printk(level, &(wiphy)->dev, format, ##args) #define wiphy_emerg(wiphy, format, args...) \ dev_emerg(&(wiphy)->dev, format, ##args) #define wiphy_alert(wiphy, format, args...) \ dev_alert(&(wiphy)->dev, format, ##args) #define wiphy_crit(wiphy, format, args...) \ dev_crit(&(wiphy)->dev, format, ##args) #define wiphy_err(wiphy, format, args...) \ dev_err(&(wiphy)->dev, format, ##args) #define wiphy_warn(wiphy, format, args...) \ dev_warn(&(wiphy)->dev, format, ##args) #define wiphy_notice(wiphy, format, args...) \ dev_notice(&(wiphy)->dev, format, ##args) #define wiphy_info(wiphy, format, args...) \ dev_info(&(wiphy)->dev, format, ##args) #define wiphy_info_once(wiphy, format, args...) \ dev_info_once(&(wiphy)->dev, format, ##args) #define wiphy_err_ratelimited(wiphy, format, args...) \ dev_err_ratelimited(&(wiphy)->dev, format, ##args) #define wiphy_warn_ratelimited(wiphy, format, args...) \ dev_warn_ratelimited(&(wiphy)->dev, format, ##args) #define wiphy_debug(wiphy, format, args...) \ wiphy_printk(KERN_DEBUG, wiphy, format, ##args) #define wiphy_dbg(wiphy, format, args...) \ dev_dbg(&(wiphy)->dev, format, ##args) #if defined(VERBOSE_DEBUG) #define wiphy_vdbg wiphy_dbg #else #define wiphy_vdbg(wiphy, format, args...) \ ({ \ if (0) \ wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ 0; \ }) #endif /* * wiphy_WARN() acts like wiphy_printk(), but with the key difference * of using a WARN/WARN_ON to get the message out, including the * file/line information and a backtrace. */ #define wiphy_WARN(wiphy, format, args...) \ WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args); /** * cfg80211_update_owe_info_event - Notify the peer's OWE info to user space * @netdev: network device * @owe_info: peer's owe info * @gfp: allocation flags */ void cfg80211_update_owe_info_event(struct net_device *netdev, struct cfg80211_update_owe_info *owe_info, gfp_t gfp); /** * cfg80211_bss_flush - resets all the scan entries * @wiphy: the wiphy */ void cfg80211_bss_flush(struct wiphy *wiphy); /** * cfg80211_bss_color_notify - notify about bss color event * @dev: network device * @cmd: the actual event we want to notify * @count: the number of TBTTs until the color change happens * @color_bitmap: representations of the colors that the local BSS is aware of * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Return: 0 on success. Non-zero on error. */ int cfg80211_bss_color_notify(struct net_device *dev, enum nl80211_commands cmd, u8 count, u64 color_bitmap, u8 link_id); /** * cfg80211_obss_color_collision_notify - notify about bss color collision * @dev: network device * @color_bitmap: representations of the colors that the local BSS is aware of * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Return: 0 on success. Non-zero on error. */ static inline int cfg80211_obss_color_collision_notify(struct net_device *dev, u64 color_bitmap, u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_OBSS_COLOR_COLLISION, 0, color_bitmap, link_id); } /** * cfg80211_color_change_started_notify - notify color change start * @dev: the device on which the color is switched * @count: the number of TBTTs until the color change happens * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Inform the userspace about the color change that has started. * * Return: 0 on success. Non-zero on error. */ static inline int cfg80211_color_change_started_notify(struct net_device *dev, u8 count, u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_STARTED, count, 0, link_id); } /** * cfg80211_color_change_aborted_notify - notify color change abort * @dev: the device on which the color is switched * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Inform the userspace about the color change that has aborted. * * Return: 0 on success. Non-zero on error. */ static inline int cfg80211_color_change_aborted_notify(struct net_device *dev, u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_ABORTED, 0, 0, link_id); } /** * cfg80211_color_change_notify - notify color change completion * @dev: the device on which the color was switched * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Inform the userspace about the color change that has completed. * * Return: 0 on success. Non-zero on error. */ static inline int cfg80211_color_change_notify(struct net_device *dev, u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_COMPLETED, 0, 0, link_id); } /** * cfg80211_links_removed - Notify about removed STA MLD setup links. * @dev: network device. * @link_mask: BIT mask of removed STA MLD setup link IDs. * * Inform cfg80211 and the userspace about removed STA MLD setup links due to * AP MLD removing the corresponding affiliated APs with Multi-Link * reconfiguration. Note that it's not valid to remove all links, in this * case disconnect instead. * Also note that the wdev mutex must be held. */ void cfg80211_links_removed(struct net_device *dev, u16 link_mask); /** * struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data * @buf: MLO Reconfiguration Response frame (header + body) * @len: length of the frame data * @driver_initiated: Indicates whether the add links request is initiated by * driver. This is set to true when the link reconfiguration request * initiated by driver due to AP link recommendation requests * (Ex: BTM (BSS Transition Management) request) handling offloaded to * driver. * @added_links: BIT mask of links successfully added to the association * @links: per-link information indexed by link ID * @links.bss: the BSS that MLO reconfiguration was requested for, ownership of * the pointer moves to cfg80211 in the call to * cfg80211_mlo_reconf_add_done(). * * The BSS pointer must be set for each link for which 'add' operation was * requested in the assoc_ml_reconf callback. */ struct cfg80211_mlo_reconf_done_data { const u8 *buf; size_t len; bool driver_initiated; u16 added_links; struct { struct cfg80211_bss *bss; u8 *addr; } links[IEEE80211_MLD_MAX_NUM_LINKS]; }; /** * cfg80211_mlo_reconf_add_done - Notify about MLO reconfiguration result * @dev: network device. * @data: MLO reconfiguration done data, &struct cfg80211_mlo_reconf_done_data * * Inform cfg80211 and the userspace that processing of ML reconfiguration * request to add links to the association is done. */ void cfg80211_mlo_reconf_add_done(struct net_device *dev, struct cfg80211_mlo_reconf_done_data *data); /** * cfg80211_schedule_channels_check - schedule regulatory check if needed * @wdev: the wireless device to check * * In case the device supports NO_IR or DFS relaxations, schedule regulatory * channels check, as previous concurrent operation conditions may not * hold anymore. */ void cfg80211_schedule_channels_check(struct wireless_dev *wdev); /** * cfg80211_epcs_changed - Notify about a change in EPCS state * @netdev: the wireless device whose EPCS state changed * @enabled: set to true if EPCS was enabled, otherwise set to false. */ void cfg80211_epcs_changed(struct net_device *netdev, bool enabled); /** * cfg80211_next_nan_dw_notif - Notify about the next NAN Discovery Window (DW) * @wdev: Pointer to the wireless device structure * @chan: DW channel (6, 44 or 149) * @gfp: Memory allocation flags */ void cfg80211_next_nan_dw_notif(struct wireless_dev *wdev, struct ieee80211_channel *chan, gfp_t gfp); /** * cfg80211_nan_cluster_joined - Notify about NAN cluster join * @wdev: Pointer to the wireless device structure * @cluster_id: Cluster ID of the NAN cluster that was joined or started * @new_cluster: Indicates if this is a new cluster or an existing one * @gfp: Memory allocation flags * * This function is used to notify user space when a NAN cluster has been * joined, providing the cluster ID and a flag whether it is a new cluster. */ void cfg80211_nan_cluster_joined(struct wireless_dev *wdev, const u8 *cluster_id, bool new_cluster, gfp_t gfp); #ifdef CONFIG_CFG80211_DEBUGFS /** * wiphy_locked_debugfs_read - do a locked read in debugfs * @wiphy: the wiphy to use * @file: the file being read * @buf: the buffer to fill and then read from * @bufsize: size of the buffer * @userbuf: the user buffer to copy to * @count: read count * @ppos: read position * @handler: the read handler to call (under wiphy lock) * @data: additional data to pass to the read handler * * Return: the number of characters read, or a negative errno */ ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*handler)(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, void *data), void *data); /** * wiphy_locked_debugfs_write - do a locked write in debugfs * @wiphy: the wiphy to use * @file: the file being written to * @buf: the buffer to copy the user data to * @bufsize: size of the buffer * @userbuf: the user buffer to copy from * @count: read count * @handler: the write handler to call (under wiphy lock) * @data: additional data to pass to the write handler * * Return: the number of characters written, or a negative errno */ ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, const char __user *userbuf, size_t count, ssize_t (*handler)(struct wiphy *wiphy, struct file *file, char *buf, size_t count, void *data), void *data); #endif /** * cfg80211_s1g_get_start_freq_khz - get S1G chandef start frequency * @chandef: the chandef to use * * Return: the chandefs starting frequency in KHz */ static inline u32 cfg80211_s1g_get_start_freq_khz(const struct cfg80211_chan_def *chandef) { u32 bw_mhz = cfg80211_chandef_get_width(chandef); u32 center_khz = MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset; return center_khz - bw_mhz * 500 + 500; } /** * cfg80211_s1g_get_end_freq_khz - get S1G chandef end frequency * @chandef: the chandef to use * * Return: the chandefs ending frequency in KHz */ static inline u32 cfg80211_s1g_get_end_freq_khz(const struct cfg80211_chan_def *chandef) { u32 bw_mhz = cfg80211_chandef_get_width(chandef); u32 center_khz = MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset; return center_khz + bw_mhz * 500 - 500; } /** * cfg80211_s1g_get_primary_sibling - retrieve the sibling 1MHz subchannel * for an S1G chandef using a 2MHz primary channel. * @wiphy: wiphy the channel belongs to * @chandef: the chandef to use * * When chandef::s1g_primary_2mhz is set to true, we are operating on a 2MHz * primary channel. The 1MHz subchannel designated by the primary channel * location exists within chandef::chan, whilst the 'sibling' is denoted as * being the other 1MHz subchannel that make up the 2MHz primary channel. * * Returns: the sibling 1MHz &struct ieee80211_channel, or %NULL on failure. */ static inline struct ieee80211_channel * cfg80211_s1g_get_primary_sibling(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef) { int width_mhz = cfg80211_chandef_get_width(chandef); u32 pri_1mhz_khz, sibling_1mhz_khz, op_low_1mhz_khz, pri_index; if (!chandef->s1g_primary_2mhz || width_mhz < 2) return NULL; pri_1mhz_khz = ieee80211_channel_to_khz(chandef->chan); op_low_1mhz_khz = cfg80211_s1g_get_start_freq_khz(chandef); /* * Compute the index of the primary 1 MHz subchannel within the * operating channel, relative to the lowest 1 MHz center frequency. * Flip the least significant bit to select the even/odd sibling, * then translate that index back into a channel frequency. */ pri_index = (pri_1mhz_khz - op_low_1mhz_khz) / 1000; sibling_1mhz_khz = op_low_1mhz_khz + ((pri_index ^ 1) * 1000); return ieee80211_get_channel_khz(wiphy, sibling_1mhz_khz); } #endif /* __NET_CFG80211_H */
20 2 19 20 20 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 // SPDX-License-Identifier: GPL-2.0-or-later /* * The ChaCha stream cipher (RFC7539) * * Copyright (C) 2015 Martin Willi */ #include <crypto/algapi.h> // for crypto_xor_cpy #include <crypto/chacha.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/module.h> static void __maybe_unused chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { /* aligned to potentially speed up crypto_xor() */ u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long)); while (bytes >= CHACHA_BLOCK_SIZE) { chacha_block_generic(state, stream, nrounds); crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE); bytes -= CHACHA_BLOCK_SIZE; dst += CHACHA_BLOCK_SIZE; src += CHACHA_BLOCK_SIZE; } if (bytes) { chacha_block_generic(state, stream, nrounds); crypto_xor_cpy(dst, src, stream, bytes); } } #ifdef CONFIG_CRYPTO_LIB_CHACHA_ARCH #include "chacha.h" /* $(SRCARCH)/chacha.h */ #else #define chacha_crypt_arch chacha_crypt_generic #define hchacha_block_arch hchacha_block_generic #endif void chacha_crypt(struct chacha_state *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { chacha_crypt_arch(state, dst, src, bytes, nrounds); } EXPORT_SYMBOL_GPL(chacha_crypt); void hchacha_block(const struct chacha_state *state, u32 out[HCHACHA_OUT_WORDS], int nrounds) { hchacha_block_arch(state, out, nrounds); } EXPORT_SYMBOL_GPL(hchacha_block); #ifdef chacha_mod_init_arch static int __init chacha_mod_init(void) { chacha_mod_init_arch(); return 0; } subsys_initcall(chacha_mod_init); static void __exit chacha_mod_exit(void) { } module_exit(chacha_mod_exit); #endif MODULE_DESCRIPTION("ChaCha stream cipher (RFC7539)"); MODULE_LICENSE("GPL");
5 5 3 2 2 1 1 1 1 1 5 2 1 2 1 1 2 1 1 1 1 1 1 1 1 1 1 2 2 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 // SPDX-License-Identifier: GPL-2.0-only /* DVB USB framework compliant Linux driver for the * DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101, * TeVii S421, S480, S482, S600, S630, S632, S650, S660, S662, * Prof 1100, 7500, * Geniatech SU3000, T220, * TechnoTrend S2-4600, * Terratec Cinergy S2 cards * Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include <media/dvb-usb-ids.h> #include "dw2102.h" #include "si21xx.h" #include "stv0299.h" #include "z0194a.h" #include "stv0288.h" #include "stb6000.h" #include "eds1547.h" #include "cx24116.h" #include "tda1002x.h" #include "mt312.h" #include "zl10039.h" #include "ts2020.h" #include "ds3000.h" #include "stv0900.h" #include "stv6110.h" #include "stb6100.h" #include "stb6100_proc.h" #include "m88rs2000.h" #include "tda18271.h" #include "cxd2820r.h" #include "m88ds3103.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 #define DW210X_READ_MSG 0 #define DW210X_WRITE_MSG 1 #define REG_1F_SYMBOLRATE_BYTE0 0x1f #define REG_20_SYMBOLRATE_BYTE1 0x20 #define REG_21_SYMBOLRATE_BYTE2 0x21 /* on my own*/ #define DW2102_VOLTAGE_CTRL (0x1800) #define SU3000_STREAM_CTRL (0x1900) #define DW2102_RC_QUERY (0x1a00) #define DW2102_LED_CTRL (0x1b00) #define DW2101_FIRMWARE "dvb-usb-dw2101.fw" #define DW2102_FIRMWARE "dvb-usb-dw2102.fw" #define DW2104_FIRMWARE "dvb-usb-dw2104.fw" #define DW3101_FIRMWARE "dvb-usb-dw3101.fw" #define S630_FIRMWARE "dvb-usb-s630.fw" #define S660_FIRMWARE "dvb-usb-s660.fw" #define P1100_FIRMWARE "dvb-usb-p1100.fw" #define P7500_FIRMWARE "dvb-usb-p7500.fw" #define err_str "did not find the firmware file '%s'. You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware" struct dw2102_state { u8 initialized; u8 last_lock; u8 data[MAX_XFER_SIZE + 4]; struct i2c_client *i2c_client_demod; struct i2c_client *i2c_client_tuner; /* fe hook functions*/ int (*old_set_voltage)(struct dvb_frontend *f, enum fe_sec_voltage v); int (*fe_read_status)(struct dvb_frontend *fe, enum fe_status *status); }; /* debug */ static int dvb_usb_dw2102_debug; module_param_named(debug, dvb_usb_dw2102_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))." DVB_USB_DEBUG_STATUS); /* demod probe */ static int demod_probe = 1; module_param_named(demod, demod_probe, int, 0644); MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 4=stv0903+stb6100(or-able))."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value, u16 index, u8 *data, u16 len, int flags) { int ret; u8 *u8buf; unsigned int pipe = (flags == DW210X_READ_MSG) ? usb_rcvctrlpipe(dev, 0) : usb_sndctrlpipe(dev, 0); u8 request_type = (flags == DW210X_READ_MSG) ? USB_DIR_IN : USB_DIR_OUT; u8buf = kmalloc(len, GFP_KERNEL); if (!u8buf) return -ENOMEM; if (flags == DW210X_WRITE_MSG) memcpy(u8buf, data, len); ret = usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR, value, index, u8buf, len, 2000); if (flags == DW210X_READ_MSG) memcpy(data, u8buf, len); kfree(u8buf); return ret; } /* I2C */ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0; u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0}; u16 value; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: if (msg[0].len < 1) { num = -EOPNOTSUPP; break; } /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { dw210x_op_rw(d->udev, 0xb5, value + i, 0, buf6, 2, DW210X_READ_MSG); msg[1].buf[i] = buf6[0]; } break; case 1: switch (msg[0].addr) { case 0x68: if (msg[0].len < 2) { num = -EOPNOTSUPP; break; } /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; buf6[2] = msg[0].buf[1]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 3, DW210X_WRITE_MSG); break; case 0x60: if (msg[0].flags == 0) { if (msg[0].len < 4) { num = -EOPNOTSUPP; break; } /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; buf6[2] = 0xc0; buf6[3] = msg[0].buf[0]; buf6[4] = msg[0].buf[1]; buf6[5] = msg[0].buf[2]; buf6[6] = msg[0].buf[3]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { if (msg[0].len < 1) { num = -EOPNOTSUPP; break; } /* read from tuner */ dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; } break; case (DW2102_RC_QUERY): if (msg[0].len < 2) { num = -EOPNOTSUPP; break; } dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): if (msg[0].len < 1) { num = -EOPNOTSUPP; break; } buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); u8 buf6[] = {0, 0, 0, 0, 0, 0, 0}; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: if (msg[0].len != 1) { warn("i2c rd: len=%d is not 1!\n", msg[0].len); num = -EOPNOTSUPP; break; } if (2 + msg[1].len > sizeof(buf6)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); num = -EOPNOTSUPP; break; } /* read si2109 register by number */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; buf6[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); /* read si2109 register */ dw210x_op_rw(d->udev, 0xc3, 0xd0, 0, buf6, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, buf6 + 2, msg[1].len); break; case 1: switch (msg[0].addr) { case 0x68: if (2 + msg[0].len > sizeof(buf6)) { warn("i2c wr: len=%d is too big!\n", msg[0].len); num = -EOPNOTSUPP; break; } /* write to si2109 register */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; memcpy(buf6 + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); break; case(DW2102_RC_QUERY): dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case(DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[MAX_XFER_SIZE], obuf[3]; if (2 + msg[0].len != sizeof(obuf)) { warn("i2c rd: len=%d is not 1!\n", msg[0].len); ret = -EOPNOTSUPP; goto unlock; } if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ dw210x_op_rw(d->udev, 0xc3, 0xd1, 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x68: { /* write to register */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case 0x61: { /* write to tuner */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf, 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } } break; } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int len, i, j, ret; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[j].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /* case 0x55: cx24116 * case 0x6a: stv0903 * case 0x68: ds3000, stv0903 * case 0x60: ts2020, stv6110, stb6100 */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 2, msg[j].len); mdelay(10); } else if (((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) || ((msg[j].buf[0] == 0xf7) && (msg[j].addr == 0x55))) { /* write firmware */ u8 obuf[19]; obuf[0] = msg[j].addr << 1; obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len); obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else { /* write registers */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); } break; } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; int i; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[MAX_XFER_SIZE], obuf[3]; if (2 + msg[0].len != sizeof(obuf)) { warn("i2c rd: len=%d is not 1!\n", msg[0].len); ret = -EOPNOTSUPP; goto unlock; } if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ dw210x_op_rw(d->udev, 0xc3, 0x19, 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x60: case 0x0c: { /* write to register */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[0].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf, 2); break; } } break; } for (i = 0; i < num; i++) { deb_xfer("%02x:%02x: %s ", i, msg[i].addr, msg[i].flags == 0 ? ">>>" : "<<<"); debug_dump(msg[i].buf, msg[i].len, deb_xfer); } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_device *udev; int len, i, j, ret; if (!d) return -ENODEV; udev = d->udev; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case (DW2102_RC_QUERY): { u8 ibuf[5]; dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 5, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 3, 2); break; } case (DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 1; obuf[1] = msg[j].buf[1];/* off-on */ dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); obuf[0] = 3; obuf[1] = msg[j].buf[0];/* 13v-18v */ dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } case (DW2102_LED_CTRL): { u8 obuf[2]; obuf[0] = 5; obuf[1] = msg[j].buf[0]; dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /* case 0x55: cx24116 * case 0x6a: stv0903 * case 0x68: ds3000, stv0903, rs2000 * case 0x60: ts2020, stv6110, stb6100 * case 0xa0: eeprom */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[MAX_XFER_SIZE]; if (msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, msg[j].len); break; } else if ((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) { /* write firmware */ u8 obuf[19]; obuf[0] = (msg[j].len > 16 ? 18 : msg[j].len + 1); obuf[1] = msg[j].addr << 1; obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else if (j < (num - 1)) { /* write register addr before read */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, le16_to_cpu(udev->descriptor.idProduct) == 0x7500 ? 0x92 : 0x90, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } else { /* write registers */ u8 obuf[MAX_XFER_SIZE]; if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } break; } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dw2102_state *state; int j; if (!d) return -ENODEV; state = d->priv; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; if (mutex_lock_interruptible(&d->data_mutex) < 0) { mutex_unlock(&d->i2c_mutex); return -EAGAIN; } j = 0; while (j < num) { switch (msg[j].addr) { case SU3000_STREAM_CTRL: state->data[0] = msg[j].buf[0] + 0x36; state->data[1] = 3; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 0, 0) < 0) err("i2c transfer failed."); break; case DW2102_RC_QUERY: state->data[0] = 0x10; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 2, 0) < 0) err("i2c transfer failed."); msg[j].buf[1] = state->data[0]; msg[j].buf[0] = state->data[1]; break; default: /* if the current write msg is followed by a another * read msg to/from the same address */ if ((j + 1 < num) && (msg[j + 1].flags & I2C_M_RD) && (msg[j].addr == msg[j + 1].addr)) { /* join both i2c msgs to one usb read command */ if (4 + msg[j].len > sizeof(state->data)) { warn("i2c combined wr/rd: write len=%d is too big!\n", msg[j].len); num = -EOPNOTSUPP; break; } if (1 + msg[j + 1].len > sizeof(state->data)) { warn("i2c combined wr/rd: read len=%d is too big!\n", msg[j + 1].len); num = -EOPNOTSUPP; break; } state->data[0] = 0x09; state->data[1] = msg[j].len; state->data[2] = msg[j + 1].len; state->data[3] = msg[j].addr; memcpy(&state->data[4], msg[j].buf, msg[j].len); if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4, state->data, msg[j + 1].len + 1, 0) < 0) err("i2c transfer failed."); memcpy(msg[j + 1].buf, &state->data[1], msg[j + 1].len); j++; break; } if (msg[j].flags & I2C_M_RD) { /* single read */ if (4 + msg[j].len > sizeof(state->data)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); num = -EOPNOTSUPP; break; } state->data[0] = 0x09; state->data[1] = 0; state->data[2] = msg[j].len; state->data[3] = msg[j].addr; memcpy(&state->data[4], msg[j].buf, msg[j].len); if (dvb_usb_generic_rw(d, state->data, 4, state->data, msg[j].len + 1, 0) < 0) err("i2c transfer failed."); memcpy(msg[j].buf, &state->data[1], msg[j].len); break; } /* single write */ if (3 + msg[j].len > sizeof(state->data)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); num = -EOPNOTSUPP; break; } state->data[0] = 0x08; state->data[1] = msg[j].addr; state->data[2] = msg[j].len; memcpy(&state->data[3], msg[j].buf, msg[j].len); if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3, state->data, 1, 0) < 0) err("i2c transfer failed."); } // switch j++; } // while mutex_unlock(&d->data_mutex); mutex_unlock(&d->i2c_mutex); return num; } static u32 dw210x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static const struct i2c_algorithm dw2102_i2c_algo = { .master_xfer = dw2102_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw2102_serit_i2c_algo = { .master_xfer = dw2102_serit_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw2102_earda_i2c_algo = { .master_xfer = dw2102_earda_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw2104_i2c_algo = { .master_xfer = dw2104_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm dw3101_i2c_algo = { .master_xfer = dw3101_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm s6x0_i2c_algo = { .master_xfer = s6x0_i2c_transfer, .functionality = dw210x_i2c_func, }; static const struct i2c_algorithm su3000_i2c_algo = { .master_xfer = su3000_i2c_transfer, .functionality = dw210x_i2c_func, }; static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 ibuf[] = {0, 0}; u8 eeprom[256], eepromline[16]; for (i = 0; i < 256; i++) { if (dw210x_op_rw(d->udev, 0xb6, 0xa0, i, ibuf, 2, DW210X_READ_MSG) < 0) { err("read eeprom failed."); return -EIO; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 8, 6); return 0; }; static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i, ret; u8 ibuf[] = { 0 }, obuf[] = { 0 }; u8 eeprom[256], eepromline[16]; struct i2c_msg msg[] = { { .addr = 0xa0 >> 1, .flags = 0, .buf = obuf, .len = 1, }, { .addr = 0xa0 >> 1, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 256; i++) { obuf[0] = i; ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2); if (ret != 2) { err("read eeprom failed."); return -EIO; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 16, 6); return 0; }; static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { static u8 command_start[] = {0x00}; static u8 command_stop[] = {0x01}; struct i2c_msg msg = { .addr = SU3000_STREAM_CTRL, .flags = 0, .buf = onoff ? command_start : command_stop, .len = 1 }; i2c_transfer(&adap->dev->i2c_adap, &msg, 1); return 0; } static int su3000_power_ctrl(struct dvb_usb_device *d, int i) { struct dw2102_state *state = d->priv; int ret = 0; info("%s: %d, initialized %d", __func__, i, state->initialized); if (i && !state->initialized) { mutex_lock(&d->data_mutex); state->data[0] = 0xde; state->data[1] = 0; state->initialized = 1; /* reset board */ ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0); mutex_unlock(&d->data_mutex); } return ret; } static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 obuf[] = { 0x1f, 0xf0 }; u8 ibuf[] = { 0 }; struct i2c_msg msg[] = { { .addr = 0x51, .flags = 0, .buf = obuf, .len = 2, }, { .addr = 0x51, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) return -EIO; else mac[i] = ibuf[0]; } return 0; } static int su3000_identify_state(struct usb_device *udev, const struct dvb_usb_device_properties *props, const struct dvb_usb_device_description **desc, int *cold) { *cold = 0; return 0; } static int dw210x_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { static u8 command_13v[] = {0x00, 0x01}; static u8 command_18v[] = {0x01, 0x01}; static u8 command_off[] = {0x00, 0x00}; struct i2c_msg msg = { .addr = DW2102_VOLTAGE_CTRL, .flags = 0, .buf = command_off, .len = 2, }; struct dvb_usb_adapter *udev_adap = fe->dvb->priv; if (voltage == SEC_VOLTAGE_18) msg.buf = command_18v; else if (voltage == SEC_VOLTAGE_13) msg.buf = command_13v; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); return 0; } static int s660_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { struct dvb_usb_adapter *d = fe->dvb->priv; struct dw2102_state *st = d->dev->priv; dw210x_set_voltage(fe, voltage); if (st->old_set_voltage) st->old_set_voltage(fe, voltage); return 0; } static void dw210x_led_ctrl(struct dvb_frontend *fe, int offon) { static u8 led_off[] = { 0 }; static u8 led_on[] = { 1 }; struct i2c_msg msg = { .addr = DW2102_LED_CTRL, .flags = 0, .buf = led_off, .len = 1 }; struct dvb_usb_adapter *udev_adap = fe->dvb->priv; if (offon) msg.buf = led_on; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); } static int tt_s2_4600_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct dvb_usb_adapter *d = fe->dvb->priv; struct dw2102_state *st = d->dev->priv; int ret; ret = st->fe_read_status(fe, status); /* resync slave fifo when signal change from unlock to lock */ if ((*status & FE_HAS_LOCK) && (!st->last_lock)) su3000_streaming_ctrl(d, 1); st->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0; return ret; } static struct stv0299_config sharp_z0194a_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct cx24116_config dw2104_config = { .demod_address = 0x55, .mpg_clk_pos_pol = 0x01, }; static struct si21xx_config serit_sp1511lhb_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct tda10023_config dw3101_tda10023_config = { .demod_address = 0x0c, .invert = 1, }; static struct mt312_config zl313_config = { .demod_address = 0x0e, }; static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; static struct ts2020_config dw2104_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, .frequency_div = 1060000, }; static struct ds3000_config s660_ds3000_config = { .demod_address = 0x68, .ci_mode = 1, .set_lock_led = dw210x_led_ctrl, }; static struct ts2020_config s660_ts2020_config = { .tuner_address = 0x60, .clk_out_div = 1, .frequency_div = 1146000, }; static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, }; static struct stb6100_config dw2104a_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static struct stv0900_config dw2104_stv0900_config = { .demod_address = 0x68, .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1,/* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config dw2104_stv6110_config = { .i2c_address = 0x60, .mclk = 16000000, .clk_div = 1, }; static struct stv0900_config prof_7500_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, .tun1_type = 3, .set_lock_led = dw210x_led_ctrl, }; static struct ds3000_config su3000_ds3000_config = { .demod_address = 0x68, .ci_mode = 1, .set_lock_led = dw210x_led_ctrl, }; static struct cxd2820r_config cxd2820r_config = { .i2c_address = 0x6c, /* (0xd8 >> 1) */ .ts_mode = 0x38, .ts_clock_inv = 1, }; static struct tda18271_config tda18271_config = { .output_opt = TDA18271_OUTPUT_LT_OFF, .gate = TDA18271_GATE_DIGITAL, }; static u8 m88rs2000_inittab[] = { DEMOD_WRITE, 0x9a, 0x30, DEMOD_WRITE, 0x00, 0x01, WRITE_DELAY, 0x19, 0x00, DEMOD_WRITE, 0x00, 0x00, DEMOD_WRITE, 0x9a, 0xb0, DEMOD_WRITE, 0x81, 0xc1, DEMOD_WRITE, 0x81, 0x81, DEMOD_WRITE, 0x86, 0xc6, DEMOD_WRITE, 0x9a, 0x30, DEMOD_WRITE, 0xf0, 0x80, DEMOD_WRITE, 0xf1, 0xbf, DEMOD_WRITE, 0xb0, 0x45, DEMOD_WRITE, 0xb2, 0x01, DEMOD_WRITE, 0x9a, 0xb0, 0xff, 0xaa, 0xff }; static struct m88rs2000_config s421_m88rs2000_config = { .demod_addr = 0x68, .inittab = m88rs2000_inittab, }; static int dw2104_frontend_attach(struct dvb_usb_adapter *d) { struct dvb_tuner_ops *tuner_ops = NULL; if (demod_probe & 4) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe) { if (dvb_attach(stb6100_attach, d->fe_adap[0].fe, &dw2104a_stb6100_config, &d->dev->i2c_adap)) { tuner_ops = &d->fe_adap[0].fe->ops.tuner_ops; tuner_ops->set_frequency = stb6100_set_freq; tuner_ops->get_frequency = stb6100_get_freq; tuner_ops->set_bandwidth = stb6100_set_bandw; tuner_ops->get_bandwidth = stb6100_get_bandw; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100!"); return 0; } } } if (demod_probe & 2) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe) { if (dvb_attach(stv6110_attach, d->fe_adap[0].fe, &dw2104_stv6110_config, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STV6110A!"); return 0; } } } if (demod_probe & 1) { d->fe_adap[0].fe = dvb_attach(cx24116_attach, &dw2104_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached cx24116!"); return 0; } } d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { dvb_attach(ts2020_attach, d->fe_adap[0].fe, &dw2104_ts2020_config, &d->dev->i2c_adap); d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached DS3000!"); return 0; } return -EIO; } static struct dvb_usb_device_properties dw2102_properties; static struct dvb_usb_device_properties dw2104_properties; static struct dvb_usb_device_properties s6x0_properties; static int dw2102_frontend_attach(struct dvb_usb_adapter *d) { if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = NULL;*/ d->fe_adap[0].fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached si21xx!"); return 0; } } if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) { d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288!"); return 0; } } } if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/ d->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194a_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0299!"); return 0; } } return -EIO; } static int dw3101_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config, &d->dev->i2c_adap, 0x48); if (d->fe_adap[0].fe) { info("Attached tda10023!"); return 0; } return -EIO; } static int zl100313_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(mt312_attach, &zl313_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe) { if (dvb_attach(zl10039_attach, d->fe_adap[0].fe, 0x60, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached zl100313+zl10039!"); return 0; } } return -EIO; } static int stv0288_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (!d->fe_adap[0].fe) return -EIO; if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap) == NULL) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached stv0288+stb6000!"); return 0; } static int ds3000_frontend_attach(struct dvb_usb_adapter *d) { struct dw2102_state *st = d->dev->priv; u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(ds3000_attach, &s660_ds3000_config, &d->dev->i2c_adap); if (!d->fe_adap[0].fe) return -EIO; dvb_attach(ts2020_attach, d->fe_adap[0].fe, &s660_ts2020_config, &d->dev->i2c_adap); st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage; d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached ds3000+ts2020!"); return 0; } static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config, &d->dev->i2c_adap, 0); if (!d->fe_adap[0].fe) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached STV0900+STB6100A!"); return 0; } static int su3000_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; mutex_lock(&d->data_mutex); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x02; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); mutex_unlock(&d->data_mutex); adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, &d->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, &dw2104_ts2020_config, &d->i2c_adap)) { info("Attached DS3000/TS2020!"); return 0; } info("Failed to attach DS3000/TS2020!"); return -EIO; } static int t220_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; mutex_lock(&d->data_mutex); state->data[0] = 0xe; state->data[1] = 0x87; state->data[2] = 0x0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x86; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(50); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); mutex_unlock(&d->data_mutex); adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, &d->i2c_adap, NULL); if (adap->fe_adap[0].fe) { if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60, &d->i2c_adap, &tda18271_config)) { info("Attached TDA18271HD/CXD2820R!"); return 0; } } info("Failed to attach TDA18271HD/CXD2820R!"); return -EIO; } static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; mutex_lock(&d->data_mutex); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); mutex_unlock(&d->data_mutex); adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, &d->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, &dw2104_ts2020_config, &d->i2c_adap)) { info("Attached RS2000/TS2020!"); return 0; } info("Failed to attach RS2000/TS2020!"); return -EIO; } static int tt_s2_4600_frontend_attach_probe_demod(struct dvb_usb_device *d, const int probe_addr) { struct dw2102_state *state = d->priv; state->data[0] = 0x9; state->data[1] = 0x1; state->data[2] = 0x1; state->data[3] = probe_addr; state->data[4] = 0x0; if (dvb_usb_generic_rw(d, state->data, 5, state->data, 2, 0) < 0) { err("i2c probe for address 0x%x failed.", probe_addr); return 0; } if (state->data[0] != 8) /* fail(7) or error, no device at address */ return 0; /* probing successful */ return 1; } static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; struct dw2102_state *state = d->priv; struct i2c_adapter *i2c_adapter; struct i2c_client *client; struct i2c_board_info board_info; struct m88ds3103_platform_data m88ds3103_pdata = {}; struct ts2020_config ts2020_config = {}; int demod_addr; mutex_lock(&d->data_mutex); state->data[0] = 0xe; state->data[1] = 0x80; state->data[2] = 0x0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x02; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); msleep(300); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 0; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0xe; state->data[1] = 0x83; state->data[2] = 1; if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) err("command 0x0e transfer failed."); state->data[0] = 0x51; if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) err("command 0x51 transfer failed."); /* probe for demodulator i2c address */ demod_addr = -1; if (tt_s2_4600_frontend_attach_probe_demod(d, 0x68)) demod_addr = 0x68; else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x69)) demod_addr = 0x69; else if (tt_s2_4600_frontend_attach_probe_demod(d, 0x6a)) demod_addr = 0x6a; mutex_unlock(&d->data_mutex); if (demod_addr < 0) { err("probing for demodulator failed. Is the external power switched on?"); return -ENODEV; } /* attach demod */ m88ds3103_pdata.clk = 27000000; m88ds3103_pdata.i2c_wr_max = 33; m88ds3103_pdata.ts_mode = M88DS3103_TS_CI; m88ds3103_pdata.ts_clk = 16000; m88ds3103_pdata.ts_clk_pol = 0; m88ds3103_pdata.spec_inv = 0; m88ds3103_pdata.agc = 0x99; m88ds3103_pdata.agc_inv = 0; m88ds3103_pdata.clk_out = M88DS3103_CLOCK_OUT_ENABLED; m88ds3103_pdata.envelope_mode = 0; m88ds3103_pdata.lnb_hv_pol = 1; m88ds3103_pdata.lnb_en_pol = 0; memset(&board_info, 0, sizeof(board_info)); if (demod_addr == 0x6a) strscpy(board_info.type, "m88ds3103b", I2C_NAME_SIZE); else strscpy(board_info.type, "m88ds3103", I2C_NAME_SIZE); board_info.addr = demod_addr; board_info.platform_data = &m88ds3103_pdata; request_module("m88ds3103"); client = i2c_new_client_device(&d->i2c_adap, &board_info); if (!i2c_client_has_driver(client)) return -ENODEV; if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); return -ENODEV; } adap->fe_adap[0].fe = m88ds3103_pdata.get_dvb_frontend(client); i2c_adapter = m88ds3103_pdata.get_i2c_adapter(client); state->i2c_client_demod = client; /* attach tuner */ ts2020_config.fe = adap->fe_adap[0].fe; memset(&board_info, 0, sizeof(board_info)); strscpy(board_info.type, "ts2022", I2C_NAME_SIZE); board_info.addr = 0x60; board_info.platform_data = &ts2020_config; request_module("ts2020"); client = i2c_new_client_device(i2c_adapter, &board_info); if (!i2c_client_has_driver(client)) { dvb_frontend_detach(adap->fe_adap[0].fe); return -ENODEV; } if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); dvb_frontend_detach(adap->fe_adap[0].fe); return -ENODEV; } /* delegate signal strength measurement to tuner */ adap->fe_adap[0].fe->ops.read_signal_strength = adap->fe_adap[0].fe->ops.tuner_ops.get_rf_strength; state->i2c_client_tuner = client; /* hook fe: need to resync the slave fifo when signal locks */ state->fe_read_status = adap->fe_adap[0].fe->ops.read_status; adap->fe_adap[0].fe->ops.read_status = tt_s2_4600_read_status; state->last_lock = 0; return 0; } static int dw2102_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_OPERA1); return 0; } static int dw3101_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_TUA6034); return 0; } static int dw2102_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0], 0); } } return 0; } static int prof_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, key[0] ^ 0xff, 0); } } return 0; } static int su3000_rc_query(struct dvb_usb_device *d) { u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { if (msg.buf[0] != 0xff) { deb_rc("%s: rc code: %x, %x\n", __func__, key[0], key[1]); rc_keydown(d->rc_dev, RC_PROTO_RC5, RC_SCANCODE_RC5(key[1], key[0]), 0); } } return 0; } enum dw2102_table_entry { CYPRESS_DW2102, CYPRESS_DW2101, CYPRESS_DW2104, TEVII_S650, TERRATEC_CINERGY_S, CYPRESS_DW3101, TEVII_S630, PROF_1100, TEVII_S660, PROF_7500, GENIATECH_SU3000, HAUPPAUGE_MAX_S2, TERRATEC_CINERGY_S2_R1, TEVII_S480_1, TEVII_S480_2, GENIATECH_X3M_SPC1400HD, TEVII_S421, TEVII_S632, TERRATEC_CINERGY_S2_R2, TERRATEC_CINERGY_S2_R3, TERRATEC_CINERGY_S2_R4, TERRATEC_CINERGY_S2_1, TERRATEC_CINERGY_S2_2, GOTVIEW_SAT_HD, GENIATECH_T220, TECHNOTREND_CONNECT_S2_4600, TEVII_S482_1, TEVII_S482_2, TEVII_S662 }; static const struct usb_device_id dw2102_table[] = { DVB_USB_DEV(CYPRESS, CYPRESS_DW2102), DVB_USB_DEV(CYPRESS, CYPRESS_DW2101), DVB_USB_DEV(CYPRESS, CYPRESS_DW2104), DVB_USB_DEV(TEVII, TEVII_S650), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S), DVB_USB_DEV(CYPRESS, CYPRESS_DW3101), DVB_USB_DEV(TEVII, TEVII_S630), DVB_USB_DEV(PROF_1, PROF_1100), DVB_USB_DEV(TEVII, TEVII_S660), DVB_USB_DEV(PROF_2, PROF_7500), DVB_USB_DEV(GTEK, GENIATECH_SU3000), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_MAX_S2), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R1), DVB_USB_DEV(TEVII, TEVII_S480_1), DVB_USB_DEV(TEVII, TEVII_S480_2), DVB_USB_DEV(GTEK, GENIATECH_X3M_SPC1400HD), DVB_USB_DEV(TEVII, TEVII_S421), DVB_USB_DEV(TEVII, TEVII_S632), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R2), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R3), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_S2_R4), DVB_USB_DEV(TERRATEC_2, TERRATEC_CINERGY_S2_1), DVB_USB_DEV(TERRATEC_2, TERRATEC_CINERGY_S2_2), DVB_USB_DEV(GOTVIEW, GOTVIEW_SAT_HD), DVB_USB_DEV(GTEK, GENIATECH_T220), DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2_4600), DVB_USB_DEV(TEVII, TEVII_S482_1), DVB_USB_DEV(TEVII, TEVII_S482_2), DVB_USB_DEV(TEVII, TEVII_S662), { } }; MODULE_DEVICE_TABLE(usb, dw2102_table); static int dw2102_load_firmware(struct usb_device *dev, const struct firmware *frmwr) { u8 *b, *p; int ret = 0, i; u8 reset; u8 reset16[] = {0, 0, 0, 0, 0, 0, 0}; const struct firmware *fw; switch (le16_to_cpu(dev->descriptor.idProduct)) { case 0x2101: ret = request_firmware(&fw, DW2101_FIRMWARE, &dev->dev); if (ret != 0) { err(err_str, DW2101_FIRMWARE); return ret; } break; default: fw = frmwr; break; } info("start downloading DW210X firmware"); p = kmalloc(fw->size, GFP_KERNEL); reset = 1; /*stop the CPU*/ dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG); if (p) { memcpy(p, fw->data, fw->size); for (i = 0; i < fw->size; i += 0x40) { b = (u8 *)p + i; if (dw210x_op_rw(dev, 0xa0, i, 0, b, 0x40, DW210X_WRITE_MSG) != 0x40) { err("error while transferring firmware"); ret = -EINVAL; break; } } /* restart the CPU */ reset = 0; if (ret || dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } if (ret || dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } /* init registers */ switch (le16_to_cpu(dev->descriptor.idProduct)) { case USB_PID_TEVII_S650: dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC; fallthrough; case USB_PID_CYPRESS_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); fallthrough; case USB_PID_CYPRESS_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); break; case USB_PID_TERRATEC_CINERGY_S: case USB_PID_CYPRESS_DW2102: dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); /* check STV0299 frontend */ dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2, DW210X_READ_MSG); if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) { dw2102_properties.i2c_algo = &dw2102_i2c_algo; dw2102_properties.adapter->fe[0].tuner_attach = &dw2102_tuner_attach; break; } /* check STV0288 frontend */ reset16[0] = 0xd0; reset16[1] = 1; reset16[2] = 0; dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3, DW210X_READ_MSG); if (reset16[2] == 0x11) { dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo; break; } fallthrough; case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); break; } msleep(100); kfree(p); } if (le16_to_cpu(dev->descriptor.idProduct) == 0x2101) release_firmware(fw); return ret; } static struct dvb_usb_device_properties dw2102_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW2102_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw2102_serit_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2102_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 3, .devices = { {"DVBWorld DVB-S 2102 USB2.0", {&dw2102_table[CYPRESS_DW2102], NULL}, {NULL}, }, {"DVBWorld DVB-S 2101 USB2.0", {&dw2102_table[CYPRESS_DW2101], NULL}, {NULL}, }, {"TerraTec Cinergy S USB", {&dw2102_table[TERRATEC_CINERGY_S], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw2104_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW2104_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw2104_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2104_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 2, .devices = { { "DVBWorld DW2104 USB2.0", {&dw2102_table[CYPRESS_DW2104], NULL}, {NULL}, }, { "TeVii S650 USB2.0", {&dw2102_table[TEVII_S650], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw3101_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = DW3101_FIRMWARE, .no_reconnect = 1, .i2c_algo = &dw3101_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_DM1105_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw3101_frontend_attach, .tuner_attach = dw3101_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { { "DVBWorld DVB-C 3101 USB2.0", {&dw2102_table[CYPRESS_DW3101], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s6x0_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = S630_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TEVII_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = zl100313_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { {"TeVii S630 USB", {&dw2102_table[TEVII_S630], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties p1100_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = P1100_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TBS_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = prof_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = stv0288_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, } }, .num_device_descs = 1, .devices = { {"Prof 1100 USB ", {&dw2102_table[PROF_1100], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s660_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = S660_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TEVII_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = ds3000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, } }, .num_device_descs = 3, .devices = { {"TeVii S660 USB", {&dw2102_table[TEVII_S660], NULL}, {NULL}, }, {"TeVii S480.1 USB", {&dw2102_table[TEVII_S480_1], NULL}, {NULL}, }, {"TeVii S480.2 USB", {&dw2102_table[TEVII_S480_2], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties p7500_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .firmware = P7500_FIRMWARE, .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TBS_NEC, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_NEC, .rc_query = prof_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = prof_7500_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, } }, } }, .num_device_descs = 1, .devices = { {"Prof 7500 USB DVB-S2", {&dw2102_table[PROF_7500], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties su3000_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = su3000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } }}, } }, .num_device_descs = 9, .devices = { { "SU3000HD DVB-S USB2.0", { &dw2102_table[GENIATECH_SU3000], NULL }, { NULL }, }, { "Hauppauge MAX S2 or WinTV NOVA HD USB2.0", { &dw2102_table[HAUPPAUGE_MAX_S2], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD", { &dw2102_table[TERRATEC_CINERGY_S2_R1], NULL }, { NULL }, }, { "X3M TV SPC1400HD PCI", { &dw2102_table[GENIATECH_X3M_SPC1400HD], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD Rev.2", { &dw2102_table[TERRATEC_CINERGY_S2_R2], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD Rev.3", { &dw2102_table[TERRATEC_CINERGY_S2_R3], NULL }, { NULL }, }, { "Terratec Cinergy S2 PCIe Dual Port 1", { &dw2102_table[TERRATEC_CINERGY_S2_1], NULL }, { NULL }, }, { "Terratec Cinergy S2 PCIe Dual Port 2", { &dw2102_table[TERRATEC_CINERGY_S2_2], NULL }, { NULL }, }, { "GOTVIEW Satellite HD", { &dw2102_table[GOTVIEW_SAT_HD], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties s421_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = m88rs2000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 2, .devices = { { "TeVii S421 PCI", { &dw2102_table[TEVII_S421], NULL }, { NULL }, }, { "TeVii S632 USB", { &dw2102_table[TEVII_S632], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties t220_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_SU3000, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = { { .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = t220_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 1, .devices = { { "Geniatech T220 DVB-T/T2 USB2.0", { &dw2102_table[GENIATECH_T220], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties tt_s2_4600_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct dw2102_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.core = { .rc_interval = 250, .rc_codes = RC_MAP_TT_1500, .module_name = "dw2102", .allowed_protos = RC_PROTO_BIT_RC5, .rc_query = su3000_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = tt_s2_4600_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } } }, } }, .num_device_descs = 5, .devices = { { "TechnoTrend TT-connect S2-4600", { &dw2102_table[TECHNOTREND_CONNECT_S2_4600], NULL }, { NULL }, }, { "TeVii S482 (tuner 1)", { &dw2102_table[TEVII_S482_1], NULL }, { NULL }, }, { "TeVii S482 (tuner 2)", { &dw2102_table[TEVII_S482_2], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB BOX", { &dw2102_table[TERRATEC_CINERGY_S2_R4], NULL }, { NULL }, }, { "TeVii S662", { &dw2102_table[TEVII_S662], NULL }, { NULL }, }, } }; static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (!(dvb_usb_device_init(intf, &dw2102_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &dw2104_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &dw3101_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &s6x0_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &p1100_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &s660_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &p7500_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &s421_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &su3000_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &t220_properties, THIS_MODULE, NULL, adapter_nr) && dvb_usb_device_init(intf, &tt_s2_4600_properties, THIS_MODULE, NULL, adapter_nr))) { return 0; } return -ENODEV; } static void dw2102_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); struct dw2102_state *st = d->priv; struct i2c_client *client; /* remove I2C client for tuner */ client = st->i2c_client_tuner; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } /* remove I2C client for demodulator */ client = st->i2c_client_demod; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } dvb_usb_device_exit(intf); } static struct usb_driver dw2102_driver = { .name = "dw2102", .probe = dw2102_probe, .disconnect = dw2102_disconnect, .id_table = dw2102_table, }; module_usb_driver(dw2102_driver); MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101 USB2.0, TeVii S421, S480, S482, S600, S630, S632, S650, TeVii S660, S662, Prof 1100, 7500 USB2.0, Geniatech SU3000, T220, TechnoTrend S2-4600, Terratec Cinergy S2 devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(DW2101_FIRMWARE); MODULE_FIRMWARE(DW2102_FIRMWARE); MODULE_FIRMWARE(DW2104_FIRMWARE); MODULE_FIRMWARE(DW3101_FIRMWARE); MODULE_FIRMWARE(S630_FIRMWARE); MODULE_FIRMWARE(S660_FIRMWARE); MODULE_FIRMWARE(P1100_FIRMWARE); MODULE_FIRMWARE(P7500_FIRMWARE);
1 1 1 1 1 3 2 7 3 4 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 // SPDX-License-Identifier: GPL-2.0-only /* * keyspan_remote: USB driver for the Keyspan DMR * * Copyright (C) 2005 Zymeta Corporation - Michael Downey (downey@zymeta.com) * * This driver has been put together with the support of Innosys, Inc. * and Keyspan, Inc the manufacturers of the Keyspan USB DMR product. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> /* Parameters that can be passed to the driver. */ static int debug; module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); /* Vendor and product ids */ #define USB_KEYSPAN_VENDOR_ID 0x06CD #define USB_KEYSPAN_PRODUCT_UIA11 0x0202 /* Defines for converting the data from the remote. */ #define ZERO 0x18 #define ZERO_MASK 0x1F /* 5 bits for a 0 */ #define ONE 0x3C #define ONE_MASK 0x3F /* 6 bits for a 1 */ #define SYNC 0x3F80 #define SYNC_MASK 0x3FFF /* 14 bits for a SYNC sequence */ #define STOP 0x00 #define STOP_MASK 0x1F /* 5 bits for the STOP sequence */ #define GAP 0xFF #define RECV_SIZE 8 /* The UIA-11 type have a 8 byte limit. */ /* * Table that maps the 31 possible keycodes to input keys. * Currently there are 15 and 17 button models so RESERVED codes * are blank areas in the mapping. */ static const unsigned short keyspan_key_table[] = { KEY_RESERVED, /* 0 is just a place holder. */ KEY_RESERVED, KEY_STOP, KEY_PLAYCD, KEY_RESERVED, KEY_PREVIOUSSONG, KEY_REWIND, KEY_FORWARD, KEY_NEXTSONG, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_PAUSE, KEY_VOLUMEUP, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_VOLUMEDOWN, KEY_RESERVED, KEY_UP, KEY_RESERVED, KEY_MUTE, KEY_LEFT, KEY_ENTER, KEY_RIGHT, KEY_RESERVED, KEY_RESERVED, KEY_DOWN, KEY_RESERVED, KEY_KPASTERISK, KEY_RESERVED, KEY_MENU }; /* table of devices that work with this driver */ static const struct usb_device_id keyspan_table[] = { { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) }, { } /* Terminating entry */ }; /* Structure to store all the real stuff that a remote sends to us. */ struct keyspan_message { u16 system; u8 button; u8 toggle; }; /* Structure used for all the bit testing magic needed to be done. */ struct bit_tester { u32 tester; int len; int pos; int bits_left; u8 buffer[32]; }; /* Structure to hold all of our driver specific stuff */ struct usb_keyspan { char name[128]; char phys[64]; unsigned short keymap[ARRAY_SIZE(keyspan_key_table)]; struct usb_device *udev; struct input_dev *input; struct usb_interface *interface; struct usb_endpoint_descriptor *in_endpoint; struct urb* irq_urb; int open; dma_addr_t in_dma; unsigned char *in_buffer; /* variables used to parse messages from remote. */ struct bit_tester data; int stage; int toggle; }; static struct usb_driver keyspan_driver; /* * Debug routine that prints out what we've received from the remote. */ static void keyspan_print(struct usb_keyspan* dev) /*unsigned char* data)*/ { char codes[4 * RECV_SIZE]; int i; for (i = 0; i < RECV_SIZE; i++) snprintf(codes + i * 3, 4, "%02x ", dev->in_buffer[i]); dev_info(&dev->udev->dev, "%s\n", codes); } /* * Routine that manages the bit_tester structure. It makes sure that there are * at least bits_needed bits loaded into the tester. */ static int keyspan_load_tester(struct usb_keyspan* dev, int bits_needed) { if (dev->data.bits_left >= bits_needed) return 0; /* * Somehow we've missed the last message. The message will be repeated * though so it's not too big a deal */ if (dev->data.pos >= dev->data.len) { dev_dbg(&dev->interface->dev, "%s - Error ran out of data. pos: %d, len: %d\n", __func__, dev->data.pos, dev->data.len); return -1; } /* Load as much as we can into the tester. */ while ((dev->data.bits_left + 7 < (sizeof(dev->data.tester) * 8)) && (dev->data.pos < dev->data.len)) { dev->data.tester += (dev->data.buffer[dev->data.pos++] << dev->data.bits_left); dev->data.bits_left += 8; } return 0; } static void keyspan_report_button(struct usb_keyspan *remote, int button, int press) { struct input_dev *input = remote->input; input_event(input, EV_MSC, MSC_SCAN, button); input_report_key(input, remote->keymap[button], press); input_sync(input); } /* * Routine that handles all the logic needed to parse out the message from the remote. */ static void keyspan_check_data(struct usb_keyspan *remote) { int i; int found = 0; struct keyspan_message message; switch(remote->stage) { case 0: /* * In stage 0 we want to find the start of a message. The remote sends a 0xFF as filler. * So the first byte that isn't a FF should be the start of a new message. */ for (i = 0; i < RECV_SIZE && remote->in_buffer[i] == GAP; ++i); if (i < RECV_SIZE) { memcpy(remote->data.buffer, remote->in_buffer, RECV_SIZE); remote->data.len = RECV_SIZE; remote->data.pos = 0; remote->data.tester = 0; remote->data.bits_left = 0; remote->stage = 1; } break; case 1: /* * Stage 1 we should have 16 bytes and should be able to detect a * SYNC. The SYNC is 14 bits, 7 0's and then 7 1's. */ memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE); remote->data.len += RECV_SIZE; found = 0; while ((remote->data.bits_left >= 14 || remote->data.pos < remote->data.len) && !found) { for (i = 0; i < 8; ++i) { if (keyspan_load_tester(remote, 14) != 0) { remote->stage = 0; return; } if ((remote->data.tester & SYNC_MASK) == SYNC) { remote->data.tester = remote->data.tester >> 14; remote->data.bits_left -= 14; found = 1; break; } else { remote->data.tester = remote->data.tester >> 1; --remote->data.bits_left; } } } if (!found) { remote->stage = 0; remote->data.len = 0; } else { remote->stage = 2; } break; case 2: /* * Stage 2 we should have 24 bytes which will be enough for a full * message. We need to parse out the system code, button code, * toggle code, and stop. */ memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE); remote->data.len += RECV_SIZE; message.system = 0; for (i = 0; i < 9; i++) { keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.system = message.system << 1; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.system = (message.system << 1) + 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { dev_err(&remote->interface->dev, "%s - Unknown sequence found in system data.\n", __func__); remote->stage = 0; return; } } message.button = 0; for (i = 0; i < 5; i++) { keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.button = message.button << 1; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.button = (message.button << 1) + 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { dev_err(&remote->interface->dev, "%s - Unknown sequence found in button data.\n", __func__); remote->stage = 0; return; } } keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.toggle = 0; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.toggle = 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { dev_err(&remote->interface->dev, "%s - Error in message, invalid toggle.\n", __func__); remote->stage = 0; return; } keyspan_load_tester(remote, 5); if ((remote->data.tester & STOP_MASK) == STOP) { remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else { dev_err(&remote->interface->dev, "Bad message received, no stop bit found.\n"); } dev_dbg(&remote->interface->dev, "%s found valid message: system: %d, button: %d, toggle: %d\n", __func__, message.system, message.button, message.toggle); if (message.toggle != remote->toggle) { keyspan_report_button(remote, message.button, 1); keyspan_report_button(remote, message.button, 0); remote->toggle = message.toggle; } remote->stage = 0; break; } } /* * Routine for sending all the initialization messages to the remote. */ static int keyspan_setup(struct usb_device* dev) { int retval = 0; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x11, 0x40, 0x5601, 0x0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", __func__, retval); return(retval); } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x44, 0x40, 0x0, 0x0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", __func__, retval); return(retval); } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x22, 0x40, 0x0, 0x0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", __func__, retval); return(retval); } dev_dbg(&dev->dev, "%s - Setup complete.\n", __func__); return(retval); } /* * Routine used to handle a new message that has come in. */ static void keyspan_irq_recv(struct urb *urb) { struct usb_keyspan *dev = urb->context; int retval; /* Check our status in case we need to bail out early. */ switch (urb->status) { case 0: break; /* Device went away so don't keep trying to read from it. */ case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto resubmit; } if (debug) keyspan_print(dev); keyspan_check_data(dev); resubmit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result: %d\n", __func__, retval); } static int keyspan_open(struct input_dev *dev) { struct usb_keyspan *remote = input_get_drvdata(dev); remote->irq_urb->dev = remote->udev; if (usb_submit_urb(remote->irq_urb, GFP_KERNEL)) return -EIO; return 0; } static void keyspan_close(struct input_dev *dev) { struct usb_keyspan *remote = input_get_drvdata(dev); usb_kill_urb(remote->irq_urb); } static struct usb_endpoint_descriptor *keyspan_get_in_endpoint(struct usb_host_interface *iface) { struct usb_endpoint_descriptor *endpoint; int i; for (i = 0; i < iface->desc.bNumEndpoints; ++i) { endpoint = &iface->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { /* we found our interrupt in endpoint */ return endpoint; } } return NULL; } /* * Routine that sets up the driver to handle a specific USB device detected on the bus. */ static int keyspan_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_endpoint_descriptor *endpoint; struct usb_keyspan *remote; struct input_dev *input_dev; int i, error; endpoint = keyspan_get_in_endpoint(interface->cur_altsetting); if (!endpoint) return -ENODEV; remote = kzalloc(sizeof(*remote), GFP_KERNEL); input_dev = input_allocate_device(); if (!remote || !input_dev) { error = -ENOMEM; goto fail1; } remote->udev = udev; remote->input = input_dev; remote->interface = interface; remote->in_endpoint = endpoint; remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_KERNEL, &remote->in_dma); if (!remote->in_buffer) { error = -ENOMEM; goto fail1; } remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!remote->irq_urb) { error = -ENOMEM; goto fail2; } error = keyspan_setup(udev); if (error) { error = -ENODEV; goto fail3; } if (udev->manufacturer) strscpy(remote->name, udev->manufacturer, sizeof(remote->name)); if (udev->product) { if (udev->manufacturer) strlcat(remote->name, " ", sizeof(remote->name)); strlcat(remote->name, udev->product, sizeof(remote->name)); } if (!strlen(remote->name)) snprintf(remote->name, sizeof(remote->name), "USB Keyspan Remote %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); usb_make_path(udev, remote->phys, sizeof(remote->phys)); strlcat(remote->phys, "/input0", sizeof(remote->phys)); memcpy(remote->keymap, keyspan_key_table, sizeof(remote->keymap)); input_dev->name = remote->name; input_dev->phys = remote->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &interface->dev; input_dev->keycode = remote->keymap; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = ARRAY_SIZE(remote->keymap); input_set_capability(input_dev, EV_MSC, MSC_SCAN); __set_bit(EV_KEY, input_dev->evbit); for (i = 0; i < ARRAY_SIZE(keyspan_key_table); i++) __set_bit(keyspan_key_table[i], input_dev->keybit); __clear_bit(KEY_RESERVED, input_dev->keybit); input_set_drvdata(input_dev, remote); input_dev->open = keyspan_open; input_dev->close = keyspan_close; /* * Initialize the URB to access the device. * The urb gets sent to the device in keyspan_open() */ usb_fill_int_urb(remote->irq_urb, remote->udev, usb_rcvintpipe(remote->udev, endpoint->bEndpointAddress), remote->in_buffer, RECV_SIZE, keyspan_irq_recv, remote, endpoint->bInterval); remote->irq_urb->transfer_dma = remote->in_dma; remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* we can register the device now, as it is ready */ error = input_register_device(remote->input); if (error) goto fail3; /* save our data pointer in this interface device */ usb_set_intfdata(interface, remote); return 0; fail3: usb_free_urb(remote->irq_urb); fail2: usb_free_coherent(udev, RECV_SIZE, remote->in_buffer, remote->in_dma); fail1: kfree(remote); input_free_device(input_dev); return error; } /* * Routine called when a device is disconnected from the USB. */ static void keyspan_disconnect(struct usb_interface *interface) { struct usb_keyspan *remote; remote = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (remote) { /* We have a valid driver structure so clean up everything we allocated. */ input_unregister_device(remote->input); usb_kill_urb(remote->irq_urb); usb_free_urb(remote->irq_urb); usb_free_coherent(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma); kfree(remote); } } /* * Standard driver set up sections */ static struct usb_driver keyspan_driver = { .name = "keyspan_remote", .probe = keyspan_probe, .disconnect = keyspan_disconnect, .id_table = keyspan_table }; module_usb_driver(keyspan_driver); MODULE_DEVICE_TABLE(usb, keyspan_table); MODULE_AUTHOR("Michael Downey <downey@zymeta.com>"); MODULE_DESCRIPTION("Driver for the USB Keyspan remote control."); MODULE_LICENSE("GPL");
7 2 16 16 16 10 1 10 1 2 1 2 5 1 2 5 1 9 9 9 9 9 1 8 8 4 4 3 3 3 3 3 1 2 2 2 1 1 1 10 10 6 9 8 7 1 6 4 1 1 4 4 6 6 5 32 38 27 11 8 3 13 25 4 4 4 3 1 3 1 3 1 3 1 18 18 21 4 3 6 1 6 1 7 1 16 6 1 18 18 17 18 18 18 18 18 17 17 17 17 38 34 3 1 458 449 7 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 // SPDX-License-Identifier: GPL-2.0-or-later /* * Extension Header handling for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Andi Kleen <ak@muc.de> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ /* Changes: * yoshfuji : ensure not to overrun while parsing * tlv options. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs(). * YOSHIFUJI Hideaki @USAGI Register inbound extension header * handlers as inet6_protocol{}. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/slab.h> #include <linux/export.h> #include <net/dst.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/calipso.h> #if IS_ENABLED(CONFIG_IPV6_MIP6) #include <net/xfrm.h> #endif #include <linux/seg6.h> #include <net/seg6.h> #ifdef CONFIG_IPV6_SEG6_HMAC #include <net/seg6_hmac.h> #endif #include <net/rpl.h> #include <linux/ioam6.h> #include <linux/ioam6_genl.h> #include <net/ioam6.h> #include <net/dst_metadata.h> #include <linux/uaccess.h> /********************* Generic functions *********************/ /* An unknown option is detected, decide what to do */ static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff, bool disallow_unknowns) { if (disallow_unknowns) { /* If unknown TLVs are disallowed by configuration * then always silently drop packet. Note this also * means no ICMP parameter problem is sent which * could be a good property to mitigate a reflection DOS * attack. */ goto drop; } switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { case 0: /* ignore */ return true; case 1: /* drop packet */ break; case 3: /* Send ICMP if not a multicast address and drop packet */ /* Actually, it is redundant check. icmp_send will recheck in any case. */ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) break; fallthrough; case 2: /* send ICMP PARM PROB regardless and drop packet */ icmpv6_param_prob_reason(skb, ICMPV6_UNK_OPTION, optoff, SKB_DROP_REASON_UNHANDLED_PROTO); return false; } drop: kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); return false; } static bool ipv6_hop_ra(struct sk_buff *skb, int optoff); static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff); static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff); static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff); #if IS_ENABLED(CONFIG_IPV6_MIP6) static bool ipv6_dest_hao(struct sk_buff *skb, int optoff); #endif /* Parse tlv encoded option header (hop-by-hop or destination) */ static bool ip6_parse_tlv(bool hopbyhop, struct sk_buff *skb, int max_count) { int len = (skb_transport_header(skb)[1] + 1) << 3; const unsigned char *nh = skb_network_header(skb); int off = skb_network_header_len(skb); bool disallow_unknowns = false; int tlv_count = 0; int padlen = 0; if (unlikely(max_count < 0)) { disallow_unknowns = true; max_count = -max_count; } off += 2; len -= 2; while (len > 0) { int optlen, i; if (nh[off] == IPV6_TLV_PAD1) { padlen++; if (padlen > 7) goto bad; off++; len--; continue; } if (len < 2) goto bad; optlen = nh[off + 1] + 2; if (optlen > len) goto bad; if (nh[off] == IPV6_TLV_PADN) { /* RFC 2460 states that the purpose of PadN is * to align the containing header to multiples * of 8. 7 is therefore the highest valid value. * See also RFC 4942, Section 2.1.9.5. */ padlen += optlen; if (padlen > 7) goto bad; /* RFC 4942 recommends receiving hosts to * actively check PadN payload to contain * only zeroes. */ for (i = 2; i < optlen; i++) { if (nh[off + i] != 0) goto bad; } } else { tlv_count++; if (tlv_count > max_count) goto bad; if (hopbyhop) { switch (nh[off]) { case IPV6_TLV_ROUTERALERT: if (!ipv6_hop_ra(skb, off)) return false; break; case IPV6_TLV_IOAM: if (!ipv6_hop_ioam(skb, off)) return false; nh = skb_network_header(skb); break; case IPV6_TLV_JUMBO: if (!ipv6_hop_jumbo(skb, off)) return false; break; case IPV6_TLV_CALIPSO: if (!ipv6_hop_calipso(skb, off)) return false; break; default: if (!ip6_tlvopt_unknown(skb, off, disallow_unknowns)) return false; break; } } else { switch (nh[off]) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_TLV_HAO: if (!ipv6_dest_hao(skb, off)) return false; break; #endif default: if (!ip6_tlvopt_unknown(skb, off, disallow_unknowns)) return false; break; } } padlen = 0; } off += optlen; len -= optlen; } if (len == 0) return true; bad: kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } /***************************** Destination options header. *****************************/ #if IS_ENABLED(CONFIG_IPV6_MIP6) static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) { struct ipv6_destopt_hao *hao; struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb); SKB_DR(reason); int ret; if (opt->dsthao) { net_dbg_ratelimited("hao duplicated\n"); goto discard; } opt->dsthao = opt->dst1; opt->dst1 = 0; hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); if (hao->length != 16) { net_dbg_ratelimited("hao invalid option length = %d\n", hao->length); SKB_DR_SET(reason, IP_INHDR); goto discard; } if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { net_dbg_ratelimited("hao is not an unicast addr: %pI6\n", &hao->addr); SKB_DR_SET(reason, INVALID_PROTO); goto discard; } ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr, (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS); if (unlikely(ret < 0)) { SKB_DR_SET(reason, XFRM_POLICY); goto discard; } if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto discard; /* update all variable using below by copied skbuff */ hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); ipv6h = ipv6_hdr(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; swap(ipv6h->saddr, hao->addr); if (skb->tstamp == 0) __net_timestamp(skb); return true; discard: kfree_skb_reason(skb, reason); return false; } #endif static int ipv6_destopt_rcv(struct sk_buff *skb) { struct inet6_dev *idev = __in6_dev_get(skb->dev); struct inet6_skb_parm *opt = IP6CB(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) __u16 dstbuf; #endif struct dst_entry *dst = skb_dst(skb); struct net *net = dev_net(skb->dev); int extlen; if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { __IP6_INC_STATS(dev_net(dst_dev(dst)), idev, IPSTATS_MIB_INHDRERRORS); fail_and_free: kfree_skb(skb); return -1; } extlen = (skb_transport_header(skb)[1] + 1) << 3; if (extlen > net->ipv6.sysctl.max_dst_opts_len) goto fail_and_free; opt->lastopt = opt->dst1 = skb_network_header_len(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) dstbuf = opt->dst1; #endif if (ip6_parse_tlv(false, skb, net->ipv6.sysctl.max_dst_opts_cnt)) { skb->transport_header += extlen; opt = IP6CB(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) opt->nhoff = dstbuf; #else opt->nhoff = opt->dst1; #endif return 1; } __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); return -1; } static void seg6_update_csum(struct sk_buff *skb) { struct ipv6_sr_hdr *hdr; struct in6_addr *addr; __be32 from, to; /* srh is at transport offset and seg_left is already decremented * but daddr is not yet updated with next segment */ hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); addr = hdr->segments + hdr->segments_left; hdr->segments_left++; from = *(__be32 *)hdr; hdr->segments_left--; to = *(__be32 *)hdr; /* update skb csum with diff resulting from seg_left decrement */ update_csum_diff4(skb, from, to); /* compute csum diff between current and next segment and update */ update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr), (__be32 *)addr); } static int ipv6_srh_rcv(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(skb->dev); struct ipv6_sr_hdr *hdr; struct inet6_dev *idev; struct in6_addr *addr; int accept_seg6; hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); idev = __in6_dev_get(skb->dev); accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled), READ_ONCE(idev->cnf.seg6_enabled)); if (!accept_seg6) { kfree_skb(skb); return -1; } #ifdef CONFIG_IPV6_SEG6_HMAC if (!seg6_hmac_validate_skb(skb)) { kfree_skb(skb); return -1; } #endif looped_back: if (hdr->segments_left == 0) { if (hdr->nexthdr == NEXTHDR_IPV6 || hdr->nexthdr == NEXTHDR_IPV4) { int offset = (hdr->hdrlen + 1) << 3; skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); skb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->encapsulation = 0; if (hdr->nexthdr == NEXTHDR_IPV4) skb->protocol = htons(ETH_P_IP); __skb_tunnel_rx(skb, skb->dev, net); netif_rx(skb); return -1; } opt->srcrt = skb_network_header_len(skb); opt->lastopt = opt->srcrt; skb->transport_header += (hdr->hdrlen + 1) << 3; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } if (hdr->segments_left >= (hdr->hdrlen >> 1)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -1; } hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); } hdr->segments_left--; addr = hdr->segments + hdr->segments_left; skb_push(skb, sizeof(struct ipv6hdr)); if (skb->ip_summed == CHECKSUM_COMPLETE) seg6_update_csum(skb); ipv6_hdr(skb)->daddr = *addr; ip6_route_input(skb); if (skb_dst(skb)->error) { dst_input(skb); return -1; } if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; skb_pull(skb, sizeof(struct ipv6hdr)); goto looped_back; } dst_input(skb); return -1; } static int ipv6_rpl_srh_rcv(struct sk_buff *skb) { struct ipv6_rpl_sr_hdr *hdr, *ohdr, *chdr; struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(skb->dev); struct inet6_dev *idev; struct ipv6hdr *oldhdr; unsigned char *buf; int accept_rpl_seg; int i, err; u64 n = 0; u32 r; idev = __in6_dev_get(skb->dev); accept_rpl_seg = min(READ_ONCE(net->ipv6.devconf_all->rpl_seg_enabled), READ_ONCE(idev->cnf.rpl_seg_enabled)); if (!accept_rpl_seg) { kfree_skb(skb); return -1; } looped_back: hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb); if (hdr->segments_left == 0) { if (hdr->nexthdr == NEXTHDR_IPV6) { int offset = (hdr->hdrlen + 1) << 3; skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); skb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->encapsulation = 0; __skb_tunnel_rx(skb, skb->dev, net); netif_rx(skb); return -1; } opt->srcrt = skb_network_header_len(skb); opt->lastopt = opt->srcrt; skb->transport_header += (hdr->hdrlen + 1) << 3; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre); r = do_div(n, (16 - hdr->cmpri)); /* checks if calculation was without remainder and n fits into * unsigned char which is segments_left field. Should not be * higher than that. */ if (r || (n + 1) > 255) { kfree_skb(skb); return -1; } if (hdr->segments_left > n + 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } hdr->segments_left--; i = n - hdr->segments_left; buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC); if (unlikely(!buf)) { kfree_skb(skb); return -1; } ohdr = (struct ipv6_rpl_sr_hdr *)buf; ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n); chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3)); if (ipv6_addr_is_multicast(&ohdr->rpl_segaddr[i])) { kfree_skb(skb); kfree(buf); return -1; } err = ipv6_chk_rpl_srh_loop(net, ohdr->rpl_segaddr, n + 1); if (err) { icmpv6_send(skb, ICMPV6_PARAMPROB, 0, 0); kfree_skb(skb); kfree(buf); return -1; } swap(ipv6_hdr(skb)->daddr, ohdr->rpl_segaddr[i]); ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n); oldhdr = ipv6_hdr(skb); skb_pull(skb, ((hdr->hdrlen + 1) << 3)); skb_postpull_rcsum(skb, oldhdr, sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3)); if (unlikely(!hdr->segments_left)) { if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0, GFP_ATOMIC)) { __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); kfree(buf); return -1; } oldhdr = ipv6_hdr(skb); } skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr)); skb_reset_network_header(skb); skb_mac_header_rebuild(skb); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); memmove(ipv6_hdr(skb), oldhdr, sizeof(struct ipv6hdr)); memcpy(skb_transport_header(skb), chdr, (chdr->hdrlen + 1) << 3); ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_postpush_rcsum(skb, ipv6_hdr(skb), sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3)); kfree(buf); ip6_route_input(skb); if (skb_dst(skb)->error) { dst_input(skb); return -1; } if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; skb_pull(skb, sizeof(struct ipv6hdr)); goto looped_back; } dst_input(skb); return -1; } /******************************** Routing header. ********************************/ /* called with rcu_read_lock() */ static int ipv6_rthdr_rcv(struct sk_buff *skb) { struct inet6_dev *idev = __in6_dev_get(skb->dev); struct inet6_skb_parm *opt = IP6CB(skb); struct in6_addr *addr = NULL; int n, i; struct ipv6_rt_hdr *hdr; struct rt0_hdr *rthdr; struct net *net = dev_net(skb->dev); int accept_source_route; accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route); if (idev) accept_source_route = min(accept_source_route, READ_ONCE(idev->cnf.accept_source_route)); if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || skb->pkt_type != PACKET_HOST) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } switch (hdr->type) { case IPV6_SRCRT_TYPE_4: /* segment routing */ return ipv6_srh_rcv(skb); case IPV6_SRCRT_TYPE_3: /* rpl segment routing */ return ipv6_rpl_srh_rcv(skb); default: break; } looped_back: if (hdr->segments_left == 0) { switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: /* Silently discard type 2 header unless it was * processed by own */ if (!addr) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } opt->lastopt = opt->srcrt = skb_network_header_len(skb); skb->transport_header += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (accept_source_route < 0) goto unknown_rh; /* Silently discard invalid RTH type 2 */ if (hdr->hdrlen != 2 || hdr->segments_left != 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } break; #endif default: goto unknown_rh; } /* * This is the routing header forwarding algorithm from * RFC 2460, page 16. */ n = hdr->hdrlen >> 1; if (hdr->segments_left > n) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } /* We are about to mangle packet header. Be careful! Do not damage packets queued somewhere. */ if (skb_cloned(skb)) { /* the copy is a forwarded packet */ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; i = n - --hdr->segments_left; rthdr = (struct rt0_hdr *) hdr; addr = rthdr->addr; addr += i - 1; switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr, IPPROTO_ROUTING) < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } if (!ipv6_chk_home_addr(skb_dst_dev_net(skb), addr)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } if (ipv6_addr_is_multicast(addr)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } swap(*addr, ipv6_hdr(skb)->daddr); ip6_route_input(skb); if (skb_dst(skb)->error) { skb_push(skb, -skb_network_offset(skb)); dst_input(skb); return -1; } if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; goto looped_back; } skb_push(skb, -skb_network_offset(skb)); dst_input(skb); return -1; unknown_rh: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb_network_header(skb)); return -1; } static const struct inet6_protocol rthdr_protocol = { .handler = ipv6_rthdr_rcv, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol destopt_protocol = { .handler = ipv6_destopt_rcv, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol nodata_protocol = { .handler = dst_discard, .flags = INET6_PROTO_NOPOLICY, }; int __init ipv6_exthdrs_init(void) { int ret; ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING); if (ret) goto out; ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS); if (ret) goto out_rthdr; ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE); if (ret) goto out_destopt; out: return ret; out_destopt: inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); out_rthdr: inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); goto out; }; void ipv6_exthdrs_exit(void) { inet6_del_protocol(&nodata_protocol, IPPROTO_NONE); inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); } /********************************** Hop-by-hop options. **********************************/ /* Router Alert as of RFC 2711 */ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] == 2) { IP6CB(skb)->flags |= IP6SKB_ROUTERALERT; memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra)); return true; } net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n", nh[optoff + 1]); kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } /* IOAM */ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff) { struct ioam6_trace_hdr *trace; struct ioam6_namespace *ns; struct ioam6_hdr *hdr; /* Bad alignment (must be 4n-aligned) */ if (optoff & 3) goto drop; /* Ignore if IOAM is not enabled on ingress */ if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled)) goto ignore; /* Truncated Option header */ hdr = (struct ioam6_hdr *)(skb_network_header(skb) + optoff); if (hdr->opt_len < 2) goto drop; switch (hdr->type) { case IOAM6_TYPE_PREALLOC: /* Truncated Pre-allocated Trace header */ if (hdr->opt_len < 2 + sizeof(*trace)) goto drop; /* Malformed Pre-allocated Trace header */ trace = (struct ioam6_trace_hdr *)((u8 *)hdr + sizeof(*hdr)); if (hdr->opt_len < 2 + sizeof(*trace) + trace->remlen * 4) goto drop; /* Ignore if the IOAM namespace is unknown */ ns = ioam6_namespace(dev_net(skb->dev), trace->namespace_id); if (!ns) goto ignore; if (!skb_valid_dst(skb)) ip6_route_input(skb); /* About to mangle packet header */ if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len)) goto drop; /* Trace pointer may have changed */ trace = (struct ioam6_trace_hdr *)(skb_network_header(skb) + optoff + sizeof(*hdr)); ioam6_fill_trace_data(skb, ns, trace, true); ioam6_event(IOAM6_EVENT_TRACE, dev_net(skb->dev), GFP_ATOMIC, (void *)trace, hdr->opt_len - 2); break; default: break; } ignore: return true; drop: kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } /* Jumbo payload */ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); SKB_DR(reason); u32 pkt_len; if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", nh[optoff+1]); SKB_DR_SET(reason, IP_INHDR); goto drop; } pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); if (pkt_len <= IPV6_MAXPLEN) { icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff + 2, SKB_DROP_REASON_IP_INHDR); return false; } if (ipv6_hdr(skb)->payload_len) { icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff, SKB_DROP_REASON_IP_INHDR); return false; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { SKB_DR_SET(reason, PKT_TOO_SMALL); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM; return true; drop: kfree_skb_reason(skb, reason); return false; } /* CALIPSO RFC 5570 */ static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] < 8) goto drop; if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1]) goto drop; if (!calipso_validate(skb, nh + optoff)) goto drop; return true; drop: kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } int ipv6_parse_hopopts(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(skb->dev); int extlen; /* * skb_network_header(skb) is equal to skb->data, and * skb_network_header_len(skb) is always equal to * sizeof(struct ipv6hdr) by definition of * hop-by-hop options. */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) || !pskb_may_pull(skb, (sizeof(struct ipv6hdr) + ((skb_transport_header(skb)[1] + 1) << 3)))) { fail_and_free: kfree_skb(skb); return -1; } extlen = (skb_transport_header(skb)[1] + 1) << 3; if (extlen > net->ipv6.sysctl.max_hbh_opts_len) goto fail_and_free; opt->flags |= IP6SKB_HOPBYHOP; if (ip6_parse_tlv(true, skb, net->ipv6.sysctl.max_hbh_opts_cnt)) { skb->transport_header += extlen; opt = IP6CB(skb); opt->nhoff = sizeof(struct ipv6hdr); return 1; } return -1; } /* * Creating outbound headers. * * "build" functions work when skb is filled from head to tail (datagram) * "push" functions work when headers are added from tail to head (tcp) * * In both cases we assume, that caller reserved enough room * for headers. */ static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p, struct in6_addr *saddr) { struct rt0_hdr *phdr, *ihdr; int hops; ihdr = (struct rt0_hdr *) opt; phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); memcpy(phdr, ihdr, sizeof(struct rt0_hdr)); hops = ihdr->rt_hdr.hdrlen >> 1; if (hops > 1) memcpy(phdr->addr, ihdr->addr + 1, (hops - 1) * sizeof(struct in6_addr)); phdr->addr[hops - 1] = **addr_p; *addr_p = ihdr->addr; phdr->rt_hdr.nexthdr = *proto; *proto = NEXTHDR_ROUTING; } static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p, struct in6_addr *saddr) { struct ipv6_sr_hdr *sr_phdr, *sr_ihdr; int plen, hops; sr_ihdr = (struct ipv6_sr_hdr *)opt; plen = (sr_ihdr->hdrlen + 1) << 3; sr_phdr = skb_push(skb, plen); memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr)); hops = sr_ihdr->first_segment + 1; memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1, (hops - 1) * sizeof(struct in6_addr)); sr_phdr->segments[0] = **addr_p; *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; if (sr_ihdr->hdrlen > hops * 2) { int tlvs_offset, tlvs_length; tlvs_offset = (1 + hops * 2) << 3; tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3; memcpy((char *)sr_phdr + tlvs_offset, (char *)sr_ihdr + tlvs_offset, tlvs_length); } #ifdef CONFIG_IPV6_SEG6_HMAC if (sr_has_hmac(sr_phdr)) { struct net *net = NULL; if (skb->dev) net = dev_net(skb->dev); else if (skb->sk) net = sock_net(skb->sk); WARN_ON(!net); if (net) seg6_push_hmac(net, saddr, sr_phdr); } #endif sr_phdr->nexthdr = *proto; *proto = NEXTHDR_ROUTING; } static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p, struct in6_addr *saddr) { switch (opt->type) { case IPV6_SRCRT_TYPE_0: case IPV6_SRCRT_STRICT: case IPV6_SRCRT_TYPE_2: ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr); break; case IPV6_SRCRT_TYPE_4: ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr); break; default: break; } } static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt) { struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt)); memcpy(h, opt, ipv6_optlen(opt)); h->nexthdr = *proto; *proto = type; } void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto, struct in6_addr **daddr, struct in6_addr *saddr) { if (opt->srcrt) { ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr); /* * IPV6_RTHDRDSTOPTS is ignored * unless IPV6_RTHDR is set (RFC3542). */ if (opt->dst0opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt); } if (opt->hopopt) ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); } void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) { if (opt->dst1opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); } EXPORT_SYMBOL(ipv6_push_frag_opts); struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) { struct ipv6_txoptions *opt2; opt2 = sock_kmemdup(sk, opt, opt->tot_len, GFP_ATOMIC); if (opt2) { long dif = (char *)opt2 - (char *)opt; if (opt2->hopopt) *((char **)&opt2->hopopt) += dif; if (opt2->dst0opt) *((char **)&opt2->dst0opt) += dif; if (opt2->dst1opt) *((char **)&opt2->dst1opt) += dif; if (opt2->srcrt) *((char **)&opt2->srcrt) += dif; refcount_set(&opt2->refcnt, 1); } return opt2; } EXPORT_SYMBOL_GPL(ipv6_dup_options); static void ipv6_renew_option(int renewtype, struct ipv6_opt_hdr **dest, struct ipv6_opt_hdr *old, struct ipv6_opt_hdr *new, int newtype, char **p) { struct ipv6_opt_hdr *src; src = (renewtype == newtype ? new : old); if (!src) return; memcpy(*p, src, ipv6_optlen(src)); *dest = (struct ipv6_opt_hdr *)*p; *p += CMSG_ALIGN(ipv6_optlen(*dest)); } /** * ipv6_renew_options - replace a specific ext hdr with a new one. * * @sk: sock from which to allocate memory * @opt: original options * @newtype: option type to replace in @opt * @newopt: new option of type @newtype to replace (user-mem) * * Returns a new set of options which is a copy of @opt with the * option type @newtype replaced with @newopt. * * @opt may be NULL, in which case a new set of options is returned * containing just @newopt. * * @newopt may be NULL, in which case the specified option type is * not copied into the new set of options. * * The new set of options is allocated from the socket option memory * buffer of @sk. */ struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, struct ipv6_opt_hdr *newopt) { int tot_len = 0; char *p; struct ipv6_txoptions *opt2; if (opt) { if (newtype != IPV6_HOPOPTS && opt->hopopt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); if (newtype != IPV6_RTHDR && opt->srcrt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); if (newtype != IPV6_DSTOPTS && opt->dst1opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); } if (newopt) tot_len += CMSG_ALIGN(ipv6_optlen(newopt)); if (!tot_len) return NULL; tot_len += sizeof(*opt2); opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC); if (!opt2) return ERR_PTR(-ENOBUFS); memset(opt2, 0, tot_len); refcount_set(&opt2->refcnt, 1); opt2->tot_len = tot_len; p = (char *)(opt2 + 1); ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt, (opt ? opt->hopopt : NULL), newopt, newtype, &p); ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt, (opt ? opt->dst0opt : NULL), newopt, newtype, &p); ipv6_renew_option(IPV6_RTHDR, (struct ipv6_opt_hdr **)&opt2->srcrt, (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL), newopt, newtype, &p); ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt, (opt ? opt->dst1opt : NULL), newopt, newtype, &p); opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0); opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); return opt2; } struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt) { /* * ignore the dest before srcrt unless srcrt is being included. * --yoshfuji */ if (opt->dst0opt && !opt->srcrt) { if (opt_space != opt) { memcpy(opt_space, opt, sizeof(*opt_space)); opt = opt_space; } opt->opt_nflen -= ipv6_optlen(opt->dst0opt); opt->dst0opt = NULL; } return opt; } EXPORT_SYMBOL_GPL(__ipv6_fixup_options); /** * fl6_update_dst - update flowi destination address with info given * by srcrt option, if any. * * @fl6: flowi6 for which daddr is to be updated * @opt: struct ipv6_txoptions in which to look for srcrt opt * @orig: copy of original daddr address if modified * * Returns NULL if no txoptions or no srcrt, otherwise returns orig * and initial value of fl6->daddr set in orig */ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, const struct ipv6_txoptions *opt, struct in6_addr *orig) { if (!opt || !opt->srcrt) return NULL; *orig = fl6->daddr; switch (opt->srcrt->type) { case IPV6_SRCRT_TYPE_0: case IPV6_SRCRT_STRICT: case IPV6_SRCRT_TYPE_2: fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; break; case IPV6_SRCRT_TYPE_4: { struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt; fl6->daddr = srh->segments[srh->segments_left]; break; } default: return NULL; } return orig; } EXPORT_SYMBOL_GPL(fl6_update_dst);
2 1 1 7 6 1 7 7 7 6 6 6 6 1 6 8 7 7 7 7 6 6 1 27 12 16 6 5 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/proc/root.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc root directory handling functions */ #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/stat.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/user_namespace.h> #include <linux/fs_context.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include <linux/fs_parser.h> #include <linux/cred.h> #include <linux/magic.h> #include <linux/slab.h> #include "internal.h" struct proc_fs_context { struct pid_namespace *pid_ns; unsigned int mask; enum proc_hidepid hidepid; int gid; enum proc_pidonly pidonly; }; enum proc_param { Opt_gid, Opt_hidepid, Opt_subset, Opt_pidns, }; static const struct fs_parameter_spec proc_fs_parameters[] = { fsparam_u32("gid", Opt_gid), fsparam_string("hidepid", Opt_hidepid), fsparam_string("subset", Opt_subset), fsparam_file_or_string("pidns", Opt_pidns), {} }; static inline int valid_hidepid(unsigned int value) { return (value == HIDEPID_OFF || value == HIDEPID_NO_ACCESS || value == HIDEPID_INVISIBLE || value == HIDEPID_NOT_PTRACEABLE); } static int proc_parse_hidepid_param(struct fs_context *fc, struct fs_parameter *param) { struct proc_fs_context *ctx = fc->fs_private; struct fs_parameter_spec hidepid_u32_spec = fsparam_u32("hidepid", Opt_hidepid); struct fs_parse_result result; int base = (unsigned long)hidepid_u32_spec.data; if (param->type != fs_value_is_string) return invalf(fc, "proc: unexpected type of hidepid value\n"); if (!kstrtouint(param->string, base, &result.uint_32)) { if (!valid_hidepid(result.uint_32)) return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string); ctx->hidepid = result.uint_32; return 0; } if (!strcmp(param->string, "off")) ctx->hidepid = HIDEPID_OFF; else if (!strcmp(param->string, "noaccess")) ctx->hidepid = HIDEPID_NO_ACCESS; else if (!strcmp(param->string, "invisible")) ctx->hidepid = HIDEPID_INVISIBLE; else if (!strcmp(param->string, "ptraceable")) ctx->hidepid = HIDEPID_NOT_PTRACEABLE; else return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string); return 0; } static int proc_parse_subset_param(struct fs_context *fc, char *value) { struct proc_fs_context *ctx = fc->fs_private; while (value) { char *ptr = strchr(value, ','); if (ptr != NULL) *ptr++ = '\0'; if (*value != '\0') { if (!strcmp(value, "pid")) { ctx->pidonly = PROC_PIDONLY_ON; } else { return invalf(fc, "proc: unsupported subset option - %s\n", value); } } value = ptr; } return 0; } #ifdef CONFIG_PID_NS static int proc_parse_pidns_param(struct fs_context *fc, struct fs_parameter *param, struct fs_parse_result *result) { struct proc_fs_context *ctx = fc->fs_private; struct pid_namespace *target, *active = task_active_pid_ns(current); struct ns_common *ns; struct file *ns_filp __free(fput) = NULL; switch (param->type) { case fs_value_is_file: /* came through fsconfig, steal the file reference */ ns_filp = no_free_ptr(param->file); break; case fs_value_is_string: ns_filp = filp_open(param->string, O_RDONLY, 0); break; default: WARN_ON_ONCE(true); break; } if (!ns_filp) ns_filp = ERR_PTR(-EBADF); if (IS_ERR(ns_filp)) { errorfc(fc, "could not get file from pidns argument"); return PTR_ERR(ns_filp); } if (!proc_ns_file(ns_filp)) return invalfc(fc, "pidns argument is not an nsfs file"); ns = get_proc_ns(file_inode(ns_filp)); if (ns->ns_type != CLONE_NEWPID) return invalfc(fc, "pidns argument is not a pidns file"); target = container_of(ns, struct pid_namespace, ns); /* * pidns= is shorthand for joining the pidns to get a fsopen fd, so the * permission model should be the same as pidns_install(). */ if (!ns_capable(target->user_ns, CAP_SYS_ADMIN)) { errorfc(fc, "insufficient permissions to set pidns"); return -EPERM; } if (!pidns_is_ancestor(target, active)) return invalfc(fc, "cannot set pidns to non-descendant pidns"); put_pid_ns(ctx->pid_ns); ctx->pid_ns = get_pid_ns(target); put_user_ns(fc->user_ns); fc->user_ns = get_user_ns(ctx->pid_ns->user_ns); return 0; } #endif /* CONFIG_PID_NS */ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct proc_fs_context *ctx = fc->fs_private; struct fs_parse_result result; int opt, err; opt = fs_parse(fc, proc_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_gid: ctx->gid = result.uint_32; break; case Opt_hidepid: err = proc_parse_hidepid_param(fc, param); if (err) return err; break; case Opt_subset: err = proc_parse_subset_param(fc, param->string); if (err) return err; break; case Opt_pidns: #ifdef CONFIG_PID_NS /* * We would have to RCU-protect every proc_pid_ns() or * proc_sb_info() access if we allowed this to be reconfigured * for an existing procfs instance. Luckily, procfs instances * are cheap to create, and mount-beneath would let you * atomically replace an instance even with overmounts. */ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { errorfc(fc, "cannot reconfigure pidns for existing procfs"); return -EBUSY; } err = proc_parse_pidns_param(fc, param, &result); if (err) return err; break; #else errorfc(fc, "pidns mount flag not supported on this system"); return -EOPNOTSUPP; #endif default: return -EINVAL; } ctx->mask |= 1 << opt; return 0; } static void proc_apply_options(struct proc_fs_info *fs_info, struct fs_context *fc, struct user_namespace *user_ns) { struct proc_fs_context *ctx = fc->fs_private; if (ctx->mask & (1 << Opt_gid)) fs_info->pid_gid = make_kgid(user_ns, ctx->gid); if (ctx->mask & (1 << Opt_hidepid)) fs_info->hide_pid = ctx->hidepid; if (ctx->mask & (1 << Opt_subset)) fs_info->pidonly = ctx->pidonly; if (ctx->mask & (1 << Opt_pidns) && !WARN_ON_ONCE(fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)) { put_pid_ns(fs_info->pid_ns); fs_info->pid_ns = get_pid_ns(ctx->pid_ns); } } static int proc_fill_super(struct super_block *s, struct fs_context *fc) { struct proc_fs_context *ctx = fc->fs_private; struct inode *root_inode; struct proc_fs_info *fs_info; int ret; fs_info = kzalloc(sizeof(*fs_info), GFP_KERNEL); if (!fs_info) return -ENOMEM; fs_info->pid_ns = get_pid_ns(ctx->pid_ns); proc_apply_options(fs_info, fc, current_user_ns()); /* User space would break if executables or devices appear on proc */ s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV; s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = PROC_SUPER_MAGIC; s->s_op = &proc_sops; s->s_time_gran = 1; s->s_fs_info = fs_info; /* * procfs isn't actually a stacking filesystem; however, there is * too much magic going on inside it to permit stacking things on * top of it */ s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; /* procfs dentries and inodes don't require IO to create */ s->s_shrink->seeks = 0; pde_get(&proc_root); root_inode = proc_get_inode(s, &proc_root); if (!root_inode) { pr_err("proc_fill_super: get root inode failed\n"); return -ENOMEM; } s->s_root = d_make_root(root_inode); if (!s->s_root) { pr_err("proc_fill_super: allocate dentry failed\n"); return -ENOMEM; } ret = proc_setup_self(s); if (ret) { return ret; } return proc_setup_thread_self(s); } static int proc_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; struct proc_fs_info *fs_info = proc_sb_info(sb); sync_filesystem(sb); proc_apply_options(fs_info, fc, current_user_ns()); return 0; } static int proc_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, proc_fill_super); } static void proc_fs_context_free(struct fs_context *fc) { struct proc_fs_context *ctx = fc->fs_private; put_pid_ns(ctx->pid_ns); kfree(ctx); } static const struct fs_context_operations proc_fs_context_ops = { .free = proc_fs_context_free, .parse_param = proc_parse_param, .get_tree = proc_get_tree, .reconfigure = proc_reconfigure, }; static int proc_init_fs_context(struct fs_context *fc) { struct proc_fs_context *ctx; ctx = kzalloc(sizeof(struct proc_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->pid_ns = get_pid_ns(task_active_pid_ns(current)); put_user_ns(fc->user_ns); fc->user_ns = get_user_ns(ctx->pid_ns->user_ns); fc->fs_private = ctx; fc->ops = &proc_fs_context_ops; return 0; } static void proc_kill_sb(struct super_block *sb) { struct proc_fs_info *fs_info = proc_sb_info(sb); if (!fs_info) { kill_anon_super(sb); return; } dput(fs_info->proc_self); dput(fs_info->proc_thread_self); kill_anon_super(sb); put_pid_ns(fs_info->pid_ns); kfree_rcu(fs_info, rcu); } static struct file_system_type proc_fs_type = { .name = "proc", .init_fs_context = proc_init_fs_context, .parameters = proc_fs_parameters, .kill_sb = proc_kill_sb, .fs_flags = FS_USERNS_MOUNT | FS_DISALLOW_NOTIFY_PERM, }; void __init proc_root_init(void) { proc_init_kmemcache(); set_proc_pid_nlink(); proc_self_init(); proc_thread_self_init(); proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_create_mount_point("fs/nfsd"); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_create_mount_point("openprom"); #endif proc_tty_init(); proc_mkdir("bus", NULL); proc_sys_init(); /* * Last things last. It is not like userspace processes eager * to open /proc files exist at this point but register last * anyway. */ register_filesystem(&proc_fs_type); } static int proc_root_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry), stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags) { if (!proc_pid_lookup(dentry, flags)) return NULL; return proc_lookup(dir, dentry, flags); } static int proc_root_readdir(struct file *file, struct dir_context *ctx) { if (ctx->pos < FIRST_PROCESS_ENTRY) { int error = proc_readdir(file, ctx); if (unlikely(error <= 0)) return error; ctx->pos = FIRST_PROCESS_ENTRY; } return proc_pid_readdir(file, ctx); } /* * The root /proc directory is special, as it has the * <pid> directories. Thus we don't use the generic * directory handling functions for that.. */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, .iterate_shared = proc_root_readdir, .llseek = generic_file_llseek, }; /* * proc root can do almost nothing.. */ static const struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, .getattr = proc_root_getattr, }; /* * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { .low_ino = PROCFS_ROOT_INO, .namelen = 5, .mode = S_IFDIR | S_IRUGO | S_IXUGO, .nlink = 2, .refcnt = REFCOUNT_INIT(1), .proc_iops = &proc_root_inode_operations, .proc_dir_ops = &proc_root_operations, .parent = &proc_root, .subdir = RB_ROOT, .name = "/proc", };
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 /* SPDX-License-Identifier: GPL-2.0 * * page_pool/helpers.h * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> * Copyright (C) 2016 Red Hat, Inc. */ /** * DOC: page_pool allocator * * The page_pool allocator is optimized for recycling page or page fragment used * by skb packet and xdp frame. * * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(), * which allocate memory with or without page splitting depending on the * requested memory size. * * If the driver knows that it always requires full pages or its allocations are * always smaller than half a page, it can use one of the more specific API * calls: * * 1. page_pool_alloc_pages(): allocate memory without page splitting when * driver knows that the memory it need is always bigger than half of the page * allocated from page pool. There is no cache line dirtying for 'struct page' * when a page is recycled back to the page pool. * * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver * knows that the memory it need is always smaller than or equal to half of the * page allocated from page pool. Page splitting enables memory saving and thus * avoids TLB/cache miss for data access, but there also is some cost to * implement page splitting, mainly some cache line dirtying/bouncing for * 'struct page' and atomic operation for page->pp_ref_count. * * The API keeps track of in-flight pages, in order to let API users know when * it is safe to free a page_pool object, the API users must call * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or * attach the page_pool object to a page_pool-aware object like skbs marked with * skb_mark_for_recycle(). * * page_pool_put_page() may be called multiple times on the same page if a page * is split into multiple fragments. For the last fragment, it will either * recycle the page, or in case of page->_refcount > 1, it will release the DMA * mapping and in-flight state accounting. * * dma_sync_single_range_for_device() is only called for the last fragment when * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the * last freed fragment to do the sync_for_device operation for all fragments in * the same page when a page is split. The API user must setup pool->p.max_len * and pool->p.offset correctly and ensure that page_pool_put_page() is called * with dma_sync_size being -1 for fragment API. */ #ifndef _NET_PAGE_POOL_HELPERS_H #define _NET_PAGE_POOL_HELPERS_H #include <linux/dma-mapping.h> #include <net/page_pool/types.h> #include <net/net_debug.h> #include <net/netmem.h> #ifdef CONFIG_PAGE_POOL_STATS /* Deprecated driver-facing API, use netlink instead */ int page_pool_ethtool_stats_get_count(void); u8 *page_pool_ethtool_stats_get_strings(u8 *data); u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats); bool page_pool_get_stats(const struct page_pool *pool, struct page_pool_stats *stats); #else static inline int page_pool_ethtool_stats_get_count(void) { return 0; } static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) { return data; } static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) { return data; } #endif /** * page_pool_dev_alloc_pages() - allocate a page. * @pool: pool from which to allocate * * Get a page from the page allocator or page_pool caches. */ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_pages(pool, gfp); } /** * page_pool_dev_alloc_frag() - allocate a page fragment. * @pool: pool from which to allocate * @offset: offset to the allocated page * @size: requested size * * Get a page fragment from the page allocator or page_pool caches. * * Return: allocated page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_frag(pool, offset, size, gfp); } static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool, unsigned int *offset, unsigned int *size, gfp_t gfp) { unsigned int max_size = PAGE_SIZE << pool->p.order; netmem_ref netmem; if ((*size << 1) > max_size) { *size = max_size; *offset = 0; return page_pool_alloc_netmems(pool, gfp); } netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp); if (unlikely(!netmem)) return 0; /* There is very likely not enough space for another fragment, so append * the remaining size to the current fragment to avoid truesize * underestimate problem. */ if (pool->frag_offset + *size > max_size) { *size = max_size - *offset; pool->frag_offset = max_size; } return netmem; } static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool, unsigned int *offset, unsigned int *size) { gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; return page_pool_alloc_netmem(pool, offset, size, gfp); } static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool) { gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; return page_pool_alloc_netmems(pool, gfp); } static inline struct page *page_pool_alloc(struct page_pool *pool, unsigned int *offset, unsigned int *size, gfp_t gfp) { return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp)); } /** * page_pool_dev_alloc() - allocate a page or a page fragment. * @pool: pool from which to allocate * @offset: offset to the allocated page * @size: in as the requested size, out as the allocated size * * Get a page or a page fragment from the page allocator or page_pool caches * depending on the requested size in order to allocate memory with least memory * utilization and performance penalty. * * Return: allocated page or page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc(struct page_pool *pool, unsigned int *offset, unsigned int *size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc(pool, offset, size, gfp); } static inline void *page_pool_alloc_va(struct page_pool *pool, unsigned int *size, gfp_t gfp) { unsigned int offset; struct page *page; /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); if (unlikely(!page)) return NULL; return page_address(page) + offset; } /** * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its * va. * @pool: pool from which to allocate * @size: in as the requested size, out as the allocated size * * This is just a thin wrapper around the page_pool_alloc() API, and * it returns va of the allocated page or page fragment. * * Return: the va for the allocated page or page fragment, otherwise return NULL. */ static inline void *page_pool_dev_alloc_va(struct page_pool *pool, unsigned int *size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_va(pool, size, gfp); } /** * page_pool_get_dma_dir() - Retrieve the stored DMA direction. * @pool: pool from which page was allocated * * Get the stored dma direction. A driver might decide to store this locally * and avoid the extra cache line from page_pool to determine the direction. */ static inline enum dma_data_direction page_pool_get_dma_dir(const struct page_pool *pool) { return pool->p.dma_dir; } static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr) { atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr); } /** * page_pool_fragment_page() - split a fresh page into fragments * @page: page to split * @nr: references to set * * pp_ref_count represents the number of outstanding references to the page, * which will be freed using page_pool APIs (rather than page allocator APIs * like put_page()). Such references are usually held by page_pool-aware * objects like skbs marked for page pool recycling. * * This helper allows the caller to take (set) multiple references to a * freshly allocated page. The page must be freshly allocated (have a * pp_ref_count of 1). This is commonly done by drivers and * "fragment allocators" to save atomic operations - either when they know * upfront how many references they will need; or to take MAX references and * return the unused ones with a single atomic dec(), instead of performing * multiple atomic inc() operations. */ static inline void page_pool_fragment_page(struct page *page, long nr) { page_pool_fragment_netmem(page_to_netmem(page), nr); } static inline long page_pool_unref_netmem(netmem_ref netmem, long nr) { atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem); long ret; /* If nr == pp_ref_count then we have cleared all remaining * references to the page: * 1. 'n == 1': no need to actually overwrite it. * 2. 'n != 1': overwrite it with one, which is the rare case * for pp_ref_count draining. * * The main advantage to doing this is that not only we avoid a atomic * update, as an atomic_read is generally a much cheaper operation than * an atomic update, especially when dealing with a page that may be * referenced by only 2 or 3 users; but also unify the pp_ref_count * handling by ensuring all pages have partitioned into only 1 piece * initially, and only overwrite it when the page is partitioned into * more than one piece. */ if (atomic_long_read(pp_ref_count) == nr) { /* As we have ensured nr is always one for constant case using * the BUILD_BUG_ON(), only need to handle the non-constant case * here for pp_ref_count draining, which is a rare case. */ BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); if (!__builtin_constant_p(nr)) atomic_long_set(pp_ref_count, 1); return 0; } ret = atomic_long_sub_return(nr, pp_ref_count); WARN_ON(ret < 0); /* We are the last user here too, reset pp_ref_count back to 1 to * ensure all pages have been partitioned into 1 piece initially, * this should be the rare case when the last two fragment users call * page_pool_unref_page() currently. */ if (unlikely(!ret)) atomic_long_set(pp_ref_count, 1); return ret; } static inline long page_pool_unref_page(struct page *page, long nr) { return page_pool_unref_netmem(page_to_netmem(page), nr); } static inline void page_pool_ref_netmem(netmem_ref netmem) { atomic_long_inc(netmem_get_pp_ref_count_ref(netmem)); } static inline void page_pool_ref_page(struct page *page) { page_pool_ref_netmem(page_to_netmem(page)); } static inline bool page_pool_unref_and_test(netmem_ref netmem) { /* If page_pool_unref_page() returns 0, we were the last user */ return page_pool_unref_netmem(netmem, 1) == 0; } static inline void page_pool_put_netmem(struct page_pool *pool, netmem_ref netmem, unsigned int dma_sync_size, bool allow_direct) { /* When page_pool isn't compiled-in, net/core/xdp.c doesn't * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL if (!page_pool_unref_and_test(netmem)) return; page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct); #endif } /** * page_pool_put_page() - release a reference to a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * @dma_sync_size: how much of the page may have been touched by the device * @allow_direct: released by the consumer, allow lockless caching * * The outcome of this depends on the page refcnt. If the driver bumps * the refcnt > 1 this will unmap the page. If the page refcnt is 1 * the allocator owns the page and will try to recycle it in one of the pool * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device * using dma_sync_single_range_for_device(). */ static inline void page_pool_put_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) { page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size, allow_direct); } static inline void page_pool_put_full_netmem(struct page_pool *pool, netmem_ref netmem, bool allow_direct) { page_pool_put_netmem(pool, netmem, -1, allow_direct); } /** * page_pool_put_full_page() - release a reference on a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * @allow_direct: released by the consumer, allow lockless caching * * Similar to page_pool_put_page(), but will DMA sync the entire memory area * as configured in &page_pool_params.max_len. */ static inline void page_pool_put_full_page(struct page_pool *pool, struct page *page, bool allow_direct) { page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct); } /** * page_pool_recycle_direct() - release a reference on a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * * Similar to page_pool_put_full_page() but caller must guarantee safe context * (e.g NAPI), since it will recycle the page directly into the pool fast cache. */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) { page_pool_put_full_page(pool, page, true); } static inline void page_pool_recycle_direct_netmem(struct page_pool *pool, netmem_ref netmem) { page_pool_put_full_netmem(pool, netmem, true); } #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ (sizeof(dma_addr_t) > sizeof(unsigned long)) /** * page_pool_free_va() - free a va into the page_pool * @pool: pool from which va was allocated * @va: va to be freed * @allow_direct: freed by the consumer, allow lockless caching * * Free a va allocated from page_pool_allo_va(). */ static inline void page_pool_free_va(struct page_pool *pool, void *va, bool allow_direct) { page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); } static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem) { dma_addr_t ret = netmem_get_dma_addr(netmem); if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) ret <<= PAGE_SHIFT; return ret; } /** * page_pool_get_dma_addr() - Retrieve the stored DMA address. * @page: page allocated from a page pool * * Fetch the DMA address of the page. The page pool to which the page belongs * must had been created with PP_FLAG_DMA_MAP. */ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) { return page_pool_get_dma_addr_netmem(page_to_netmem(page)); } static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool, const dma_addr_t dma_addr, u32 offset, u32 dma_sync_size) { dma_sync_single_range_for_cpu(pool->p.dev, dma_addr, offset + pool->p.offset, dma_sync_size, page_pool_get_dma_dir(pool)); } /** * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW * @pool: &page_pool the @page belongs to * @page: page to sync * @offset: offset from page start to "hard" start if using PP frags * @dma_sync_size: size of the data written to the page * * Can be used as a shorthand to sync Rx pages before accessing them in the * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``. * Note that this version performs DMA sync unconditionally, even if the * associated PP doesn't perform sync-for-device. */ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, const struct page *page, u32 offset, u32 dma_sync_size) { __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset, dma_sync_size); } static inline void page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool, const netmem_ref netmem, u32 offset, u32 dma_sync_size) { if (!pool->dma_sync_for_cpu) return; __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr_netmem(netmem), offset, dma_sync_size); } static inline void page_pool_get(struct page_pool *pool) { refcount_inc(&pool->user_cnt); } static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt); } static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) { if (unlikely(pool->p.nid != new_nid)) page_pool_update_nid(pool, new_nid); } /** * page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU * @pool: queried page pool * * Check if page pool will return buffers which are unreadable to the CPU / * kernel. This will only be the case if user space bound a memory provider (mp) * which returns unreadable memory to the queue served by the page pool. * If %PP_FLAG_ALLOW_UNREADABLE_NETMEM was set but there is no mp bound * this helper will return false. See also netif_rxq_has_unreadable_mp(). * * Return: true if memory allocated by the page pool may be unreadable */ static inline bool page_pool_is_unreadable(struct page_pool *pool) { return !!pool->mp_ops; } #endif /* _NET_PAGE_POOL_HELPERS_H */
3 3 3 3 3 3 1 4 1 4 3 6 6 6 6 6 6 6 3 9 3 6 3 3 3 3 9 9 9 9 9 9 8 3 6 6 3 6 6 3 3 3 3 6 3 3 3 6 6 6 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 /* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/export.h> #include <linux/moduleparam.h> #include <drm/drm_bridge.h> #include <drm/drm_client_event.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_sysfs.h> #include "drm_crtc_helper_internal.h" /** * DOC: output probing helper overview * * This library provides some helper code for output probing. It provides an * implementation of the core &drm_connector_funcs.fill_modes interface with * drm_helper_probe_single_connector_modes(). * * It also provides support for polling connectors with a work item and for * generic hotplug interrupt handling where the driver doesn't or cannot keep * track of a per-connector hpd interrupt. * * This helper library can be used independently of the modeset helper library. * Drivers can also overwrite different parts e.g. use their own hotplug * handling code to avoid probing unrelated outputs. * * The probe helpers share the function table structures with other display * helper libraries. See &struct drm_connector_helper_funcs for the details. */ static bool drm_kms_helper_poll = true; module_param_named(poll, drm_kms_helper_poll, bool, 0600); static enum drm_mode_status drm_mode_validate_flag(const struct drm_display_mode *mode, int flags) { if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && !(flags & DRM_MODE_FLAG_INTERLACE)) return MODE_NO_INTERLACE; if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) && !(flags & DRM_MODE_FLAG_DBLSCAN)) return MODE_NO_DBLESCAN; if ((mode->flags & DRM_MODE_FLAG_3D_MASK) && !(flags & DRM_MODE_FLAG_3D_MASK)) return MODE_NO_STEREO; return MODE_OK; } static int drm_mode_validate_pipeline(struct drm_display_mode *mode, struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, enum drm_mode_status *status) { struct drm_device *dev = connector->dev; struct drm_encoder *encoder; int ret; /* Step 1: Validate against connector */ ret = drm_connector_mode_valid(connector, mode, ctx, status); if (ret || *status != MODE_OK) return ret; /* Step 2: Validate against encoders and crtcs */ drm_connector_for_each_possible_encoder(connector, encoder) { struct drm_bridge *bridge; struct drm_crtc *crtc; *status = drm_encoder_mode_valid(encoder, mode); if (*status != MODE_OK) { /* No point in continuing for crtc check as this encoder * will not accept the mode anyway. If all encoders * reject the mode then, at exit, ret will not be * MODE_OK. */ continue; } bridge = drm_bridge_chain_get_first_bridge(encoder); *status = drm_bridge_chain_mode_valid(bridge, &connector->display_info, mode); drm_bridge_put(bridge); if (*status != MODE_OK) { /* There is also no point in continuing for crtc check * here. */ continue; } drm_for_each_crtc(crtc, dev) { if (!drm_encoder_crtc_ok(encoder, crtc)) continue; *status = drm_crtc_mode_valid(crtc, mode); if (*status == MODE_OK) { /* If we get to this point there is at least * one combination of encoder+crtc that works * for this mode. Lets return now. */ return 0; } } } return 0; } static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) { struct drm_cmdline_mode *cmdline_mode; struct drm_display_mode *mode; cmdline_mode = &connector->cmdline_mode; if (!cmdline_mode->specified) return 0; /* Only add a GTF mode if we find no matching probed modes */ list_for_each_entry(mode, &connector->probed_modes, head) { if (mode->hdisplay != cmdline_mode->xres || mode->vdisplay != cmdline_mode->yres) continue; if (cmdline_mode->refresh_specified) { /* The probed mode's vrefresh is set until later */ if (drm_mode_vrefresh(mode) != cmdline_mode->refresh) continue; } /* Mark the matching mode as being preferred by the user */ mode->type |= DRM_MODE_TYPE_USERDEF; return 0; } mode = drm_mode_create_from_cmdline_mode(connector->dev, cmdline_mode); if (mode == NULL) return 0; drm_mode_probed_add(connector, mode); return 1; } enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (!crtc_funcs || !crtc_funcs->mode_valid) return MODE_OK; return crtc_funcs->mode_valid(crtc, mode); } enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) { const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; if (!encoder_funcs || !encoder_funcs->mode_valid) return MODE_OK; return encoder_funcs->mode_valid(encoder, mode); } int drm_connector_mode_valid(struct drm_connector *connector, const struct drm_display_mode *mode, struct drm_modeset_acquire_ctx *ctx, enum drm_mode_status *status) { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; int ret = 0; if (!connector_funcs) *status = MODE_OK; else if (connector_funcs->mode_valid_ctx) ret = connector_funcs->mode_valid_ctx(connector, mode, ctx,