12 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/circ_buf.h> #include <linux/mutex.h> /* This module provides arbitrary resource management routines. * I use it to manage the device's memory. * Despite the name of this module, I am *not* going to access the ioports. */ #include <linux/ioport.h> #define IFORCE_MAX_LENGTH 16 #define IFORCE_EFFECTS_MAX 32 /* Each force feedback effect is made of one core effect, which can be * associated to at most to effect modifiers */ #define FF_MOD1_IS_USED 0 #define FF_MOD2_IS_USED 1 #define FF_CORE_IS_USED 2 #define FF_CORE_IS_PLAYED 3 /* Effect is currently being played */ #define FF_CORE_SHOULD_PLAY 4 /* User wants the effect to be played */ #define FF_CORE_UPDATE 5 /* Effect is being updated */ #define FF_MODCORE_CNT 6 struct iforce_core_effect { /* Information about where modifiers are stored in the device's memory */ struct resource mod1_chunk; struct resource mod2_chunk; unsigned long flags[BITS_TO_LONGS(FF_MODCORE_CNT)]; }; #define FF_CMD_EFFECT 0x010e #define FF_CMD_ENVELOPE 0x0208 #define FF_CMD_MAGNITUDE 0x0303 #define FF_CMD_PERIOD 0x0407 #define FF_CMD_CONDITION 0x050a #define FF_CMD_AUTOCENTER 0x4002 #define FF_CMD_PLAY 0x4103 #define FF_CMD_ENABLE 0x4201 #define FF_CMD_GAIN 0x4301 #define FF_CMD_QUERY 0xff01 /* Buffer for async write */ #define XMIT_SIZE 256 #define XMIT_INC(var, n) (var)+=n; (var)&= XMIT_SIZE -1 /* iforce::xmit_flags */ #define IFORCE_XMIT_RUNNING 0 #define IFORCE_XMIT_AGAIN 1 struct iforce_device { u16 idvendor; u16 idproduct; char *name; signed short *btn; signed short *abs; signed short *ff; }; struct iforce; struct iforce_xport_ops { void (*xmit)(struct iforce *iforce); int (*get_id)(struct iforce *iforce, u8 id, u8 *response_data, size_t *response_len); int (*start_io)(struct iforce *iforce); void (*stop_io)(struct iforce *iforce); }; struct iforce { struct input_dev *dev; /* Input device interface */ struct iforce_device *type; const struct iforce_xport_ops *xport_ops; spinlock_t xmit_lock; /* Buffer used for asynchronous sending of bytes to the device */ struct circ_buf xmit; unsigned char xmit_data[XMIT_SIZE]; unsigned long xmit_flags[1]; /* Force Feedback */ wait_queue_head_t wait; struct resource device_memory; struct iforce_core_effect core_effects[IFORCE_EFFECTS_MAX]; struct mutex mem_mutex; }; /* Get hi and low bytes of a 16-bits int */ #define HI(a) ((unsigned char)((a) >> 8)) #define LO(a) ((unsigned char)((a) & 0xff)) /* For many parameters, it seems that 0x80 is a special value that should * be avoided. Instead, we replace this value by 0x7f */ #define HIFIX80(a) ((unsigned char)(((a)<0? (a)+255 : (a))>>8)) /* Encode a time value */ #define TIME_SCALE(a) (a) static inline int iforce_get_id_packet(struct iforce *iforce, u8 id, u8 *response_data, size_t *response_len) { return iforce->xport_ops->get_id(iforce, id, response_data, response_len); } static inline void iforce_clear_xmit_and_wake(struct iforce *iforce) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); wake_up_all(&iforce->wait); } /* Public functions */ /* iforce-main.c */ int iforce_init_device(struct device *parent, u16 bustype, struct iforce *iforce); /* iforce-packets.c */ int iforce_control_playback(struct iforce*, u16 id, unsigned int); void iforce_process_packet(struct iforce *iforce, u8 packet_id, u8 *data, size_t len); int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data); void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data); /* iforce-ff.c */ int iforce_upload_periodic(struct iforce *, struct ff_effect *, struct ff_effect *); int iforce_upload_constant(struct iforce *, struct ff_effect *, struct ff_effect *); int iforce_upload_condition(struct iforce *, struct ff_effect *, struct ff_effect *); /* Public variables */ extern struct serio_driver iforce_serio_drv; extern struct usb_driver iforce_usb_driver; |
57 57 118 118 117 93 116 1 57 114 118 6 104 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 | // SPDX-License-Identifier: GPL-2.0-or-later /* * V4L2 controls framework control definitions. * * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl> */ #include <linux/export.h> #include <media/v4l2-ctrls.h> /* * Returns NULL or a character pointer array containing the menu for * the given control ID. The pointer array ends with a NULL pointer. * An empty string signifies a menu entry that is invalid. This allows * drivers to disable certain options if it is not supported. */ const char * const *v4l2_ctrl_get_menu(u32 id) { static const char * const mpeg_audio_sampling_freq[] = { "44.1 kHz", "48 kHz", "32 kHz", NULL }; static const char * const mpeg_audio_encoding[] = { "MPEG-1/2 Layer I", "MPEG-1/2 Layer II", "MPEG-1/2 Layer III", "MPEG-2/4 AAC", "AC-3", NULL }; static const char * const mpeg_audio_l1_bitrate[] = { "32 kbps", "64 kbps", "96 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "288 kbps", "320 kbps", "352 kbps", "384 kbps", "416 kbps", "448 kbps", NULL }; static const char * const mpeg_audio_l2_bitrate[] = { "32 kbps", "48 kbps", "56 kbps", "64 kbps", "80 kbps", "96 kbps", "112 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "320 kbps", "384 kbps", NULL }; static const char * const mpeg_audio_l3_bitrate[] = { "32 kbps", "40 kbps", "48 kbps", "56 kbps", "64 kbps", "80 kbps", "96 kbps", "112 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "320 kbps", NULL }; static const char * const mpeg_audio_ac3_bitrate[] = { "32 kbps", "40 kbps", "48 kbps", "56 kbps", "64 kbps", "80 kbps", "96 kbps", "112 kbps", "128 kbps", "160 kbps", "192 kbps", "224 kbps", "256 kbps", "320 kbps", "384 kbps", "448 kbps", "512 kbps", "576 kbps", "640 kbps", NULL }; static const char * const mpeg_audio_mode[] = { "Stereo", "Joint Stereo", "Dual", "Mono", NULL }; static const char * const mpeg_audio_mode_extension[] = { "Bound 4", "Bound 8", "Bound 12", "Bound 16", NULL }; static const char * const mpeg_audio_emphasis[] = { "No Emphasis", "50/15 us", "CCITT J17", NULL }; static const char * const mpeg_audio_crc[] = { "No CRC", "16-bit CRC", NULL }; static const char * const mpeg_audio_dec_playback[] = { "Auto", "Stereo", "Left", "Right", "Mono", "Swapped Stereo", NULL }; static const char * const mpeg_video_encoding[] = { "MPEG-1", "MPEG-2", "MPEG-4 AVC", NULL }; static const char * const mpeg_video_aspect[] = { "1x1", "4x3", "16x9", "2.21x1", NULL }; static const char * const mpeg_video_bitrate_mode[] = { "Variable Bitrate", "Constant Bitrate", "Constant Quality", NULL }; static const char * const mpeg_stream_type[] = { "MPEG-2 Program Stream", "MPEG-2 Transport Stream", "MPEG-1 System Stream", "MPEG-2 DVD-compatible Stream", "MPEG-1 VCD-compatible Stream", "MPEG-2 SVCD-compatible Stream", NULL }; static const char * const mpeg_stream_vbi_fmt[] = { "No VBI", "Private Packet, IVTV Format", NULL }; static const char * const camera_power_line_frequency[] = { "Disabled", "50 Hz", "60 Hz", "Auto", NULL }; static const char * const camera_exposure_auto[] = { "Auto Mode", "Manual Mode", "Shutter Priority Mode", "Aperture Priority Mode", NULL }; static const char * const camera_exposure_metering[] = { "Average", "Center Weighted", "Spot", "Matrix", NULL }; static const char * const camera_auto_focus_range[] = { "Auto", "Normal", "Macro", "Infinity", NULL }; static const char * const colorfx[] = { "None", "Black & White", "Sepia", "Negative", "Emboss", "Sketch", "Sky Blue", "Grass Green", "Skin Whiten", "Vivid", "Aqua", "Art Freeze", "Silhouette", "Solarization", "Antique", "Set Cb/Cr", NULL }; static const char * const auto_n_preset_white_balance[] = { "Manual", "Auto", "Incandescent", "Fluorescent", "Fluorescent H", "Horizon", "Daylight", "Flash", "Cloudy", "Shade", NULL, }; static const char * const camera_iso_sensitivity_auto[] = { "Manual", "Auto", NULL }; static const char * const scene_mode[] = { "None", "Backlight", "Beach/Snow", "Candle Light", "Dusk/Dawn", "Fall Colors", "Fireworks", "Landscape", "Night", "Party/Indoor", "Portrait", "Sports", "Sunset", "Text", NULL }; static const char * const tune_emphasis[] = { "None", "50 Microseconds", "75 Microseconds", NULL, }; static const char * const header_mode[] = { "Separate Buffer", "Joined With 1st Frame", NULL, }; static const char * const multi_slice[] = { "Single", "Max Macroblocks", "Max Bytes", NULL, }; static const char * const entropy_mode[] = { "CAVLC", "CABAC", NULL, }; static const char * const mpeg_h264_level[] = { "1", "1b", "1.1", "1.2", "1.3", "2", "2.1", "2.2", "3", "3.1", "3.2", "4", "4.1", "4.2", "5", "5.1", "5.2", "6.0", "6.1", "6.2", NULL, }; static const char * const h264_loop_filter[] = { "Enabled", "Disabled", "Disabled at Slice Boundary", NULL, }; static const char * const h264_profile[] = { "Baseline", "Constrained Baseline", "Main", "Extended", "High", "High 10", "High 422", "High 444 Predictive", "High 10 Intra", "High 422 Intra", "High 444 Intra", "CAVLC 444 Intra", "Scalable Baseline", "Scalable High", "Scalable High Intra", "Stereo High", "Multiview High", "Constrained High", NULL, }; static const char * const vui_sar_idc[] = { "Unspecified", "1:1", "12:11", "10:11", "16:11", "40:33", "24:11", "20:11", "32:11", "80:33", "18:11", "15:11", "64:33", "160:99", "4:3", "3:2", "2:1", "Extended SAR", NULL, }; static const char * const h264_fp_arrangement_type[] = { "Checkerboard", "Column", "Row", "Side by Side", "Top Bottom", "Temporal", NULL, }; static const char * const h264_fmo_map_type[] = { "Interleaved Slices", "Scattered Slices", "Foreground with Leftover", "Box Out", "Raster Scan", "Wipe Scan", "Explicit", NULL, }; static const char * const h264_decode_mode[] = { "Slice-Based", "Frame-Based", NULL, }; static const char * const h264_start_code[] = { "No Start Code", "Annex B Start Code", NULL, }; static const char * const h264_hierarchical_coding_type[] = { "Hier Coding B", "Hier Coding P", NULL, }; static const char * const mpeg_mpeg2_level[] = { "Low", "Main", "High 1440", "High", NULL, }; static const char * const mpeg2_profile[] = { "Simple", "Main", "SNR Scalable", "Spatially Scalable", "High", NULL, }; static const char * const mpeg_mpeg4_level[] = { "0", "0b", "1", "2", "3", "3b", "4", "5", NULL, }; static const char * const mpeg4_profile[] = { "Simple", "Advanced Simple", "Core", "Simple Scalable", "Advanced Coding Efficiency", NULL, }; static const char * const vpx_golden_frame_sel[] = { "Use Previous Frame", "Use Previous Specific Frame", NULL, }; static const char * const vp8_profile[] = { "0", "1", "2", "3", NULL, }; static const char * const vp9_profile[] = { "0", "1", "2", "3", NULL, }; static const char * const vp9_level[] = { "1", "1.1", "2", "2.1", "3", "3.1", "4", "4.1", "5", "5.1", "5.2", "6", "6.1", "6.2", NULL, }; static const char * const flash_led_mode[] = { "Off", "Flash", "Torch", NULL, }; static const char * const flash_strobe_source[] = { "Software", "External", NULL, }; static const char * const jpeg_chroma_subsampling[] = { "4:4:4", "4:2:2", "4:2:0", "4:1:1", "4:1:0", "Gray", NULL, }; static const char * const dv_tx_mode[] = { "DVI-D", "HDMI", NULL, }; static const char * const dv_rgb_range[] = { "Automatic", "RGB Limited Range (16-235)", "RGB Full Range (0-255)", NULL, }; static const char * const dv_it_content_type[] = { "Graphics", "Photo", "Cinema", "Game", "No IT Content", NULL, }; static const char * const detect_md_mode[] = { "Disabled", "Global", "Threshold Grid", "Region Grid", NULL, }; static const char * const av1_profile[] = { "Main", "High", "Professional", NULL, }; static const char * const av1_level[] = { "2.0", "2.1", "2.2", "2.3", "3.0", "3.1", "3.2", "3.3", "4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2", "6.3", "7.0", "7.1", "7.2", "7.3", NULL, }; static const char * const hevc_profile[] = { "Main", "Main Still Picture", "Main 10", NULL, }; static const char * const hevc_level[] = { "1", "2", "2.1", "3", "3.1", "4", "4.1", "5", "5.1", "5.2", "6", "6.1", "6.2", NULL, }; static const char * const hevc_hierarchial_coding_type[] = { "B", "P", NULL, }; static const char * const hevc_refresh_type[] = { "None", "CRA", "IDR", NULL, }; static const char * const hevc_size_of_length_field[] = { "0", "1", "2", "4", NULL, }; static const char * const hevc_tier[] = { "Main", "High", NULL, }; static const char * const hevc_loop_filter_mode[] = { "Disabled", "Enabled", "Disabled at slice boundary", "NULL", }; static const char * const hevc_decode_mode[] = { "Slice-Based", "Frame-Based", NULL, }; static const char * const hevc_start_code[] = { "No Start Code", "Annex B Start Code", NULL, }; static const char * const camera_orientation[] = { "Front", "Back", "External", NULL, }; static const char * const mpeg_video_frame_skip[] = { "Disabled", "Level Limit", "VBV/CPB Limit", NULL, }; static const char * const intra_refresh_period_type[] = { "Random", "Cyclic", NULL, }; switch (id) { case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return mpeg_audio_sampling_freq; case V4L2_CID_MPEG_AUDIO_ENCODING: return mpeg_audio_encoding; case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return mpeg_audio_l1_bitrate; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return mpeg_audio_l2_bitrate; case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return mpeg_audio_l3_bitrate; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return mpeg_audio_ac3_bitrate; case V4L2_CID_MPEG_AUDIO_MODE: return mpeg_audio_mode; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return mpeg_audio_mode_extension; case V4L2_CID_MPEG_AUDIO_EMPHASIS: return mpeg_audio_emphasis; case V4L2_CID_MPEG_AUDIO_CRC: return mpeg_audio_crc; case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return mpeg_audio_dec_playback; case V4L2_CID_MPEG_VIDEO_ENCODING: return mpeg_video_encoding; case V4L2_CID_MPEG_VIDEO_ASPECT: return mpeg_video_aspect; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return mpeg_video_bitrate_mode; case V4L2_CID_MPEG_STREAM_TYPE: return mpeg_stream_type; case V4L2_CID_MPEG_STREAM_VBI_FMT: return mpeg_stream_vbi_fmt; case V4L2_CID_POWER_LINE_FREQUENCY: return camera_power_line_frequency; case V4L2_CID_EXPOSURE_AUTO: return camera_exposure_auto; case V4L2_CID_EXPOSURE_METERING: return camera_exposure_metering; case V4L2_CID_AUTO_FOCUS_RANGE: return camera_auto_focus_range; case V4L2_CID_COLORFX: return colorfx; case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return auto_n_preset_white_balance; case V4L2_CID_ISO_SENSITIVITY_AUTO: return camera_iso_sensitivity_auto; case V4L2_CID_SCENE_MODE: return scene_mode; case V4L2_CID_TUNE_PREEMPHASIS: return tune_emphasis; case V4L2_CID_TUNE_DEEMPHASIS: return tune_emphasis; case V4L2_CID_FLASH_LED_MODE: return flash_led_mode; case V4L2_CID_FLASH_STROBE_SOURCE: return flash_strobe_source; case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return header_mode; case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return mpeg_video_frame_skip; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return multi_slice; case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return entropy_mode; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return mpeg_h264_level; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return h264_loop_filter; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return h264_profile; case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return vui_sar_idc; case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return h264_fp_arrangement_type; case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return h264_fmo_map_type; case V4L2_CID_STATELESS_H264_DECODE_MODE: return h264_decode_mode; case V4L2_CID_STATELESS_H264_START_CODE: return h264_start_code; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return h264_hierarchical_coding_type; case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return mpeg_mpeg2_level; case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return mpeg2_profile; case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return mpeg_mpeg4_level; case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return mpeg4_profile; case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return vpx_golden_frame_sel; case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return vp8_profile; case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return vp9_profile; case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return vp9_level; case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return jpeg_chroma_subsampling; case V4L2_CID_DV_TX_MODE: return dv_tx_mode; case V4L2_CID_DV_TX_RGB_RANGE: case V4L2_CID_DV_RX_RGB_RANGE: return dv_rgb_range; case V4L2_CID_DV_TX_IT_CONTENT_TYPE: case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return dv_it_content_type; case V4L2_CID_DETECT_MD_MODE: return detect_md_mode; case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return hevc_profile; case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return hevc_level; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return hevc_hierarchial_coding_type; case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return hevc_refresh_type; case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return hevc_size_of_length_field; case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return hevc_tier; case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return hevc_loop_filter_mode; case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return av1_profile; case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return av1_level; case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return hevc_decode_mode; case V4L2_CID_STATELESS_HEVC_START_CODE: return hevc_start_code; case V4L2_CID_CAMERA_ORIENTATION: return camera_orientation; case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return intra_refresh_period_type; default: return NULL; } } EXPORT_SYMBOL(v4l2_ctrl_get_menu); #define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); }) /* * Returns NULL or an s64 type array containing the menu for given * control ID. The total number of the menu items is returned in @len. */ const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len) { static const s64 qmenu_int_vpx_num_partitions[] = { 1, 2, 4, 8, }; static const s64 qmenu_int_vpx_num_ref_frames[] = { 1, 2, 3, }; switch (id) { case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len); case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len); default: *len = 0; return NULL; } } EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); /* Return the control name. */ const char *v4l2_ctrl_get_name(u32 id) { switch (id) { /* USER controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_USER_CLASS: return "User Controls"; case V4L2_CID_BRIGHTNESS: return "Brightness"; case V4L2_CID_CONTRAST: return "Contrast"; case V4L2_CID_SATURATION: return "Saturation"; case V4L2_CID_HUE: return "Hue"; case V4L2_CID_AUDIO_VOLUME: return "Volume"; case V4L2_CID_AUDIO_BALANCE: return "Balance"; case V4L2_CID_AUDIO_BASS: return "Bass"; case V4L2_CID_AUDIO_TREBLE: return "Treble"; case V4L2_CID_AUDIO_MUTE: return "Mute"; case V4L2_CID_AUDIO_LOUDNESS: return "Loudness"; case V4L2_CID_BLACK_LEVEL: return "Black Level"; case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic"; case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance"; case V4L2_CID_RED_BALANCE: return "Red Balance"; case V4L2_CID_BLUE_BALANCE: return "Blue Balance"; case V4L2_CID_GAMMA: return "Gamma"; case V4L2_CID_EXPOSURE: return "Exposure"; case V4L2_CID_AUTOGAIN: return "Gain, Automatic"; case V4L2_CID_GAIN: return "Gain"; case V4L2_CID_HFLIP: return "Horizontal Flip"; case V4L2_CID_VFLIP: return "Vertical Flip"; case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency"; case V4L2_CID_HUE_AUTO: return "Hue, Automatic"; case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature"; case V4L2_CID_SHARPNESS: return "Sharpness"; case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation"; case V4L2_CID_CHROMA_AGC: return "Chroma AGC"; case V4L2_CID_COLOR_KILLER: return "Color Killer"; case V4L2_CID_COLORFX: return "Color Effects"; case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic"; case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter"; case V4L2_CID_ROTATE: return "Rotate"; case V4L2_CID_BG_COLOR: return "Background Color"; case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers"; case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers"; case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr"; case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB"; /* * Codec controls * * The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical. * * Keep the order of the 'case's the same as in videodev2.h! */ case V4L2_CID_CODEC_CLASS: return "Codec Controls"; case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type"; case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID"; case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID"; case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID"; case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID"; case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID"; case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID"; case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format"; case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency"; case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding"; case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate"; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate"; case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate"; case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode"; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension"; case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis"; case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC"; case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute"; case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate"; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate"; case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback"; case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback"; case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding"; case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect"; case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames"; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size"; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure"; case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown"; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode"; case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality"; case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate"; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate"; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation"; case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute"; case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs"; case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type"; case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period"; case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics"; case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode"; case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay"; case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable"; case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters"; case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode"; case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period"; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode"; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile"; case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR"; case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR"; case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI"; case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0"; case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type"; case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering"; case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO"; case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups"; case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change"; case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp"; case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs"; case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering"; case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP: return "H264 Set QP Value for HC Layers"; case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: return "H264 Constrained Intra Pred"; case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset"; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate"; case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate"; case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level"; case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile"; case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice"; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice"; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method"; case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS"; case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count"; case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color"; case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control"; case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range"; case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID"; case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count"; case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index"; case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames"; case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: return "Average QP Value"; case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value"; case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value"; /* VPX controls */ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable"; case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame"; case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range"; case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control"; case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period"; case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator"; case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile"; case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile"; case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level"; /* HEVC controls */ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile"; case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level"; case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier"; case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution"; case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth"; case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type"; case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction"; case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding"; case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront"; case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate"; case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB"; case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID"; case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing"; case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split"; case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction"; case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs"; case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode"; case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR"; case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset"; case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset"; case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field"; case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame"; case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR"; /* AV1 controls */ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile"; case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level"; /* CAMERA controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_CAMERA_CLASS: return "Camera Controls"; case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure"; case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute"; case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate"; case V4L2_CID_PAN_RELATIVE: return "Pan, Relative"; case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative"; case V4L2_CID_PAN_RESET: return "Pan, Reset"; case V4L2_CID_TILT_RESET: return "Tilt, Reset"; case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute"; case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute"; case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute"; case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative"; case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous"; case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute"; case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative"; case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous"; case V4L2_CID_PRIVACY: return "Privacy"; case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute"; case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative"; case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias"; case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset"; case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range"; case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization"; case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity"; case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto"; case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode"; case V4L2_CID_SCENE_MODE: return "Scene Mode"; case V4L2_CID_3A_LOCK: return "3A Lock"; case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start"; case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop"; case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status"; case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range"; case V4L2_CID_PAN_SPEED: return "Pan, Speed"; case V4L2_CID_TILT_SPEED: return "Tilt, Speed"; case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size"; case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation"; case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation"; case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode"; /* FM Radio Modulator controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls"; case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation"; case V4L2_CID_RDS_TX_PI: return "RDS Program ID"; case V4L2_CID_RDS_TX_PTY: return "RDS Program Type"; case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name"; case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text"; case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo"; case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head"; case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed"; case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY"; case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music"; case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies"; case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies"; case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled"; case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time"; case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis"; case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; /* Flash controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_FLASH_CLASS: return "Flash Controls"; case V4L2_CID_FLASH_LED_MODE: return "LED Mode"; case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source"; case V4L2_CID_FLASH_STROBE: return "Strobe"; case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe"; case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status"; case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout"; case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode"; case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode"; case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator"; case V4L2_CID_FLASH_FAULT: return "Faults"; case V4L2_CID_FLASH_CHARGE: return "Charge"; case V4L2_CID_FLASH_READY: return "Ready to Strobe"; /* JPEG encoder controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls"; case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling"; case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval"; case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality"; case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers"; /* Image source controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls"; case V4L2_CID_VBLANK: return "Vertical Blanking"; case V4L2_CID_HBLANK: return "Horizontal Blanking"; case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain"; case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value"; case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value"; case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value"; case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value"; case V4L2_CID_NOTIFY_GAINS: return "Notify Gains"; /* Image processing controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls"; case V4L2_CID_LINK_FREQ: return "Link Frequency"; case V4L2_CID_PIXEL_RATE: return "Pixel Rate"; case V4L2_CID_TEST_PATTERN: return "Test Pattern"; case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode"; case V4L2_CID_DIGITAL_GAIN: return "Digital Gain"; /* DV controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_DV_CLASS: return "Digital Video Controls"; case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present"; case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present"; case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present"; case V4L2_CID_DV_TX_MODE: return "Transmit Mode"; case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range"; case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type"; case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present"; case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range"; case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type"; case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls"; case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis"; case V4L2_CID_RDS_RECEPTION: return "RDS Reception"; case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls"; case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain"; case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto"; case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain"; case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto"; case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain"; case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto"; case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain"; case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto"; case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth"; case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock"; case V4L2_CID_RDS_RX_PTY: return "RDS Program Type"; case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name"; case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text"; case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement"; case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program"; case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music"; /* Detection controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_DETECT_CLASS: return "Detection Controls"; case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode"; case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold"; case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid"; case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid"; /* Stateless Codec controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls"; case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode"; case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code"; case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set"; case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set"; case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix"; case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table"; case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters"; case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters"; case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters"; case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters"; case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header"; case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header"; case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices"; case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates"; case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters"; case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set"; case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set"; case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters"; case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix"; case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters"; case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode"; case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code"; case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets"; case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters"; case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry"; case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters"; case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain"; /* Colorimetry controls */ /* Keep the order of the 'case's the same as in v4l2-controls.h! */ case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls"; case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info"; case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display"; default: return NULL; } } EXPORT_SYMBOL(v4l2_ctrl_get_name); void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags) { *name = v4l2_ctrl_get_name(id); *flags = 0; switch (id) { case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_LOUDNESS: case V4L2_CID_AUTO_WHITE_BALANCE: case V4L2_CID_AUTOGAIN: case V4L2_CID_HFLIP: case V4L2_CID_VFLIP: case V4L2_CID_HUE_AUTO: case V4L2_CID_CHROMA_AGC: case V4L2_CID_COLOR_KILLER: case V4L2_CID_AUTOBRIGHTNESS: case V4L2_CID_MPEG_AUDIO_MUTE: case V4L2_CID_MPEG_VIDEO_MUTE: case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: case V4L2_CID_MPEG_VIDEO_PULLDOWN: case V4L2_CID_EXPOSURE_AUTO_PRIORITY: case V4L2_CID_FOCUS_AUTO: case V4L2_CID_PRIVACY: case V4L2_CID_AUDIO_LIMITER_ENABLED: case V4L2_CID_AUDIO_COMPRESSION_ENABLED: case V4L2_CID_PILOT_TONE_ENABLED: case V4L2_CID_ILLUMINATORS_1: case V4L2_CID_ILLUMINATORS_2: case V4L2_CID_FLASH_STROBE_STATUS: case V4L2_CID_FLASH_CHARGE: case V4L2_CID_FLASH_READY: case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: case V4L2_CID_WIDE_DYNAMIC_RANGE: case V4L2_CID_IMAGE_STABILIZATION: case V4L2_CID_RDS_RECEPTION: case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: case V4L2_CID_RF_TUNER_PLL_LOCK: case V4L2_CID_RDS_TX_MONO_STEREO: case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: case V4L2_CID_RDS_TX_COMPRESSED: case V4L2_CID_RDS_TX_DYNAMIC_PTY: case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: case V4L2_CID_RDS_TX_MUSIC_SPEECH: case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: case V4L2_CID_RDS_RX_MUSIC_SPEECH: *type = V4L2_CTRL_TYPE_BOOLEAN; *min = 0; *max = *step = 1; break; case V4L2_CID_ROTATE: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT; break; case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: *type = V4L2_CTRL_TYPE_INTEGER; break; case V4L2_CID_MPEG_VIDEO_LTR_COUNT: *type = V4L2_CTRL_TYPE_INTEGER; break; case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; break; case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: *type = V4L2_CTRL_TYPE_BITMASK; *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; break; case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: case V4L2_CID_PAN_RESET: case V4L2_CID_TILT_RESET: case V4L2_CID_FLASH_STROBE: case V4L2_CID_FLASH_STROBE_STOP: case V4L2_CID_AUTO_FOCUS_START: case V4L2_CID_AUTO_FOCUS_STOP: case V4L2_CID_DO_WHITE_BALANCE: *type = V4L2_CTRL_TYPE_BUTTON; *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; *min = *max = *step = *def = 0; break; case V4L2_CID_POWER_LINE_FREQUENCY: case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: case V4L2_CID_MPEG_AUDIO_ENCODING: case V4L2_CID_MPEG_AUDIO_L1_BITRATE: case V4L2_CID_MPEG_AUDIO_L2_BITRATE: case V4L2_CID_MPEG_AUDIO_L3_BITRATE: case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: case V4L2_CID_MPEG_AUDIO_MODE: case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: case V4L2_CID_MPEG_AUDIO_EMPHASIS: case V4L2_CID_MPEG_AUDIO_CRC: case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: case V4L2_CID_MPEG_VIDEO_ENCODING: case V4L2_CID_MPEG_VIDEO_ASPECT: case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: case V4L2_CID_MPEG_STREAM_TYPE: case V4L2_CID_MPEG_STREAM_VBI_FMT: case V4L2_CID_EXPOSURE_AUTO: case V4L2_CID_AUTO_FOCUS_RANGE: case V4L2_CID_COLORFX: case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: case V4L2_CID_TUNE_PREEMPHASIS: case V4L2_CID_FLASH_LED_MODE: case V4L2_CID_FLASH_STROBE_SOURCE: case V4L2_CID_MPEG_VIDEO_HEADER_MODE: case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: case V4L2_CID_MPEG_VIDEO_H264_LEVEL: case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: case V4L2_CID_MPEG_VIDEO_H264_PROFILE: case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: case V4L2_CID_ISO_SENSITIVITY_AUTO: case V4L2_CID_EXPOSURE_METERING: case V4L2_CID_SCENE_MODE: case V4L2_CID_DV_TX_MODE: case V4L2_CID_DV_TX_RGB_RANGE: case V4L2_CID_DV_TX_IT_CONTENT_TYPE: case V4L2_CID_DV_RX_RGB_RANGE: case V4L2_CID_DV_RX_IT_CONTENT_TYPE: case V4L2_CID_TEST_PATTERN: case V4L2_CID_DEINTERLACING_MODE: case V4L2_CID_TUNE_DEEMPHASIS: case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: case V4L2_CID_DETECT_MD_MODE: case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: case V4L2_CID_MPEG_VIDEO_HEVC_TIER: case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: case V4L2_CID_STATELESS_HEVC_DECODE_MODE: case V4L2_CID_STATELESS_HEVC_START_CODE: case V4L2_CID_STATELESS_H264_DECODE_MODE: case V4L2_CID_STATELESS_H264_START_CODE: case V4L2_CID_CAMERA_ORIENTATION: case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: case V4L2_CID_HDR_SENSOR_MODE: *type = V4L2_CTRL_TYPE_MENU; break; case V4L2_CID_LINK_FREQ: *type = V4L2_CTRL_TYPE_INTEGER_MENU; break; case V4L2_CID_RDS_TX_PS_NAME: case V4L2_CID_RDS_TX_RADIO_TEXT: case V4L2_CID_RDS_RX_PS_NAME: case V4L2_CID_RDS_RX_RADIO_TEXT: *type = V4L2_CTRL_TYPE_STRING; break; case V4L2_CID_ISO_SENSITIVITY: case V4L2_CID_AUTO_EXPOSURE_BIAS: case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: *type = V4L2_CTRL_TYPE_INTEGER_MENU; break; case V4L2_CID_USER_CLASS: case V4L2_CID_CAMERA_CLASS: case V4L2_CID_CODEC_CLASS: case V4L2_CID_FM_TX_CLASS: case V4L2_CID_FLASH_CLASS: case V4L2_CID_JPEG_CLASS: case V4L2_CID_IMAGE_SOURCE_CLASS: case V4L2_CID_IMAGE_PROC_CLASS: case V4L2_CID_DV_CLASS: case V4L2_CID_FM_RX_CLASS: case V4L2_CID_RF_TUNER_CLASS: case V4L2_CID_DETECT_CLASS: case V4L2_CID_CODEC_STATELESS_CLASS: case V4L2_CID_COLORIMETRY_CLASS: *type = V4L2_CTRL_TYPE_CTRL_CLASS; /* You can neither read nor write these */ *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY; *min = *max = *step = *def = 0; break; case V4L2_CID_BG_COLOR: case V4L2_CID_COLORFX_RGB: *type = V4L2_CTRL_TYPE_INTEGER; *step = 1; *min = 0; /* Max is calculated as RGB888 that is 2^24 - 1 */ *max = 0xffffff; break; case V4L2_CID_COLORFX_CBCR: *type = V4L2_CTRL_TYPE_INTEGER; *step = 1; *min = 0; *max = 0xffff; break; case V4L2_CID_FLASH_FAULT: case V4L2_CID_JPEG_ACTIVE_MARKER: case V4L2_CID_3A_LOCK: case V4L2_CID_AUTO_FOCUS_STATUS: case V4L2_CID_DV_TX_HOTPLUG: case V4L2_CID_DV_TX_RXSENSE: case V4L2_CID_DV_TX_EDID_PRESENT: case V4L2_CID_DV_RX_POWER_PRESENT: *type = V4L2_CTRL_TYPE_BITMASK; break; case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_MPEG_VIDEO_DEC_PTS: *type = V4L2_CTRL_TYPE_INTEGER64; *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; *min = *def = 0; *max = 0x1ffffffffLL; *step = 1; break; case V4L2_CID_MPEG_VIDEO_DEC_FRAME: *type = V4L2_CTRL_TYPE_INTEGER64; *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; *min = *def = 0; *max = 0x7fffffffffffffffLL; *step = 1; break; case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: *type = V4L2_CTRL_TYPE_INTEGER64; *min = 0; /* default for 8 bit black, luma is 16, chroma is 128 */ *def = 0x8000800010LL; *max = 0xffffffffffffLL; *step = 1; break; case V4L2_CID_MPEG_VIDEO_AVERAGE_QP: *type = V4L2_CTRL_TYPE_INTEGER; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_PIXEL_RATE: *type = V4L2_CTRL_TYPE_INTEGER64; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_DETECT_MD_REGION_GRID: *type = V4L2_CTRL_TYPE_U8; break; case V4L2_CID_DETECT_MD_THRESHOLD_GRID: *type = V4L2_CTRL_TYPE_U16; break; case V4L2_CID_RDS_TX_ALT_FREQS: *type = V4L2_CTRL_TYPE_U32; break; case V4L2_CID_STATELESS_MPEG2_SEQUENCE: *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE; break; case V4L2_CID_STATELESS_MPEG2_PICTURE: *type = V4L2_CTRL_TYPE_MPEG2_PICTURE; break; case V4L2_CID_STATELESS_MPEG2_QUANTISATION: *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION; break; case V4L2_CID_STATELESS_FWHT_PARAMS: *type = V4L2_CTRL_TYPE_FWHT_PARAMS; break; case V4L2_CID_STATELESS_H264_SPS: *type = V4L2_CTRL_TYPE_H264_SPS; break; case V4L2_CID_STATELESS_H264_PPS: *type = V4L2_CTRL_TYPE_H264_PPS; break; case V4L2_CID_STATELESS_H264_SCALING_MATRIX: *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX; break; case V4L2_CID_STATELESS_H264_SLICE_PARAMS: *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS; break; case V4L2_CID_STATELESS_H264_DECODE_PARAMS: *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS; break; case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS; break; case V4L2_CID_STATELESS_VP8_FRAME: *type = V4L2_CTRL_TYPE_VP8_FRAME; break; case V4L2_CID_STATELESS_HEVC_SPS: *type = V4L2_CTRL_TYPE_HEVC_SPS; break; case V4L2_CID_STATELESS_HEVC_PPS: *type = V4L2_CTRL_TYPE_HEVC_PPS; break; case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS; *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; break; case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX; break; case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS; break; case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: *type = V4L2_CTRL_TYPE_U32; *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; break; case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR; break; case V4L2_CID_STATELESS_VP9_FRAME: *type = V4L2_CTRL_TYPE_VP9_FRAME; break; case V4L2_CID_STATELESS_AV1_SEQUENCE: *type = V4L2_CTRL_TYPE_AV1_SEQUENCE; break; case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY; *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY; break; case V4L2_CID_STATELESS_AV1_FRAME: *type = V4L2_CTRL_TYPE_AV1_FRAME; break; case V4L2_CID_STATELESS_AV1_FILM_GRAIN: *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN; break; case V4L2_CID_UNIT_CELL_SIZE: *type = V4L2_CTRL_TYPE_AREA; *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO; break; case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY; break; default: *type = V4L2_CTRL_TYPE_INTEGER; break; } switch (id) { case V4L2_CID_MPEG_AUDIO_ENCODING: case V4L2_CID_MPEG_AUDIO_MODE: case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: case V4L2_CID_MPEG_VIDEO_B_FRAMES: case V4L2_CID_MPEG_STREAM_TYPE: *flags |= V4L2_CTRL_FLAG_UPDATE; break; case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: case V4L2_CID_BRIGHTNESS: case V4L2_CID_CONTRAST: case V4L2_CID_SATURATION: case V4L2_CID_HUE: case V4L2_CID_RED_BALANCE: case V4L2_CID_BLUE_BALANCE: case V4L2_CID_GAMMA: case V4L2_CID_SHARPNESS: case V4L2_CID_CHROMA_GAIN: case V4L2_CID_RDS_TX_DEVIATION: case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: case V4L2_CID_AUDIO_LIMITER_DEVIATION: case V4L2_CID_AUDIO_COMPRESSION_GAIN: case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: case V4L2_CID_PILOT_TONE_DEVIATION: case V4L2_CID_PILOT_TONE_FREQUENCY: case V4L2_CID_TUNE_POWER_LEVEL: case V4L2_CID_TUNE_ANTENNA_CAPACITOR: case V4L2_CID_RF_TUNER_RF_GAIN: case V4L2_CID_RF_TUNER_LNA_GAIN: case V4L2_CID_RF_TUNER_MIXER_GAIN: case V4L2_CID_RF_TUNER_IF_GAIN: case V4L2_CID_RF_TUNER_BANDWIDTH: case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: *flags |= V4L2_CTRL_FLAG_SLIDER; break; case V4L2_CID_PAN_RELATIVE: case V4L2_CID_TILT_RELATIVE: case V4L2_CID_FOCUS_RELATIVE: case V4L2_CID_IRIS_RELATIVE: case V4L2_CID_ZOOM_RELATIVE: *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; break; case V4L2_CID_FLASH_STROBE_STATUS: case V4L2_CID_AUTO_FOCUS_STATUS: case V4L2_CID_FLASH_READY: case V4L2_CID_DV_TX_HOTPLUG: case V4L2_CID_DV_TX_RXSENSE: case V4L2_CID_DV_TX_EDID_PRESENT: case V4L2_CID_DV_RX_POWER_PRESENT: case V4L2_CID_DV_RX_IT_CONTENT_TYPE: case V4L2_CID_RDS_RX_PTY: case V4L2_CID_RDS_RX_PS_NAME: case V4L2_CID_RDS_RX_RADIO_TEXT: case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: case V4L2_CID_RDS_RX_MUSIC_SPEECH: case V4L2_CID_CAMERA_ORIENTATION: case V4L2_CID_CAMERA_SENSOR_ROTATION: *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; case V4L2_CID_RF_TUNER_PLL_LOCK: *flags |= V4L2_CTRL_FLAG_VOLATILE; break; } } EXPORT_SYMBOL(v4l2_ctrl_fill); |
1 1 4 4 4 3 3 3 4 2 2 5 5 5 5 1 1 1 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 | // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_usb6501.c * Comedi driver for National Instruments USB-6501 * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2014 Luca Ellero <luca.ellero@brickedbrain.com> */ /* * Driver: ni_usb6501 * Description: National Instruments USB-6501 module * Devices: [National Instruments] USB-6501 (ni_usb6501) * Author: Luca Ellero <luca.ellero@brickedbrain.com> * Updated: 8 Sep 2014 * Status: works * * * Configuration Options: * none */ /* * NI-6501 - USB PROTOCOL DESCRIPTION * * Every command is composed by two USB packets: * - request (out) * - response (in) * * Every packet is at least 12 bytes long, here is the meaning of * every field (all values are hex): * * byte 0 is always 00 * byte 1 is always 01 * byte 2 is always 00 * byte 3 is the total packet length * * byte 4 is always 00 * byte 5 is the total packet length - 4 * byte 6 is always 01 * byte 7 is the command * * byte 8 is 02 (request) or 00 (response) * byte 9 is 00 (response) or 10 (port request) or 20 (counter request) * byte 10 is always 00 * byte 11 is 00 (request) or 02 (response) * * PORT PACKETS * * CMD: 0xE READ_PORT * REQ: 00 01 00 10 00 0C 01 0E 02 10 00 00 00 03 <PORT> 00 * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 00 03 <BMAP> 00 * * CMD: 0xF WRITE_PORT * REQ: 00 01 00 14 00 10 01 0F 02 10 00 00 00 03 <PORT> 00 03 <BMAP> 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD: 0x12 SET_PORT_DIR (0 = input, 1 = output) * REQ: 00 01 00 18 00 14 01 12 02 10 00 00 * 00 05 <PORT 0> <PORT 1> <PORT 2> 00 05 00 00 00 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * COUNTER PACKETS * * CMD 0x9: START_COUNTER * REQ: 00 01 00 0C 00 08 01 09 02 20 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD 0xC: STOP_COUNTER * REQ: 00 01 00 0C 00 08 01 0C 02 20 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD 0xE: READ_COUNTER * REQ: 00 01 00 0C 00 08 01 0E 02 20 00 00 * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 <u32 counter value, Big Endian> * * CMD 0xF: WRITE_COUNTER * REQ: 00 01 00 10 00 0C 01 0F 02 20 00 00 <u32 counter value, Big Endian> * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * * Please visit https://www.brickedbrain.com if you need * additional information or have any questions. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/comedi/comedi_usb.h> #define NI6501_TIMEOUT 1000 /* Port request packets */ static const u8 READ_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x0E, 0x02, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00}; static const u8 WRITE_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x14, 0x00, 0x10, 0x01, 0x0F, 0x02, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00}; static const u8 SET_PORT_DIR_REQUEST[] = {0x00, 0x01, 0x00, 0x18, 0x00, 0x14, 0x01, 0x12, 0x02, 0x10, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00}; /* Counter request packets */ static const u8 START_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x09, 0x02, 0x20, 0x00, 0x00}; static const u8 STOP_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x0C, 0x02, 0x20, 0x00, 0x00}; static const u8 READ_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x0E, 0x02, 0x20, 0x00, 0x00}; static const u8 WRITE_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x0F, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; /* Response packets */ static const u8 GENERIC_RESPONSE[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02}; static const u8 READ_PORT_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x03, 0x00, 0x00}; static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}; /* Largest supported packets */ static const size_t TX_MAX_SIZE = sizeof(SET_PORT_DIR_REQUEST); static const size_t RX_MAX_SIZE = sizeof(READ_PORT_RESPONSE); enum commands { READ_PORT, WRITE_PORT, SET_PORT_DIR, START_COUNTER, STOP_COUNTER, READ_COUNTER, WRITE_COUNTER }; struct ni6501_private { struct usb_endpoint_descriptor *ep_rx; struct usb_endpoint_descriptor *ep_tx; struct mutex mut; u8 *usb_rx_buf; u8 *usb_tx_buf; }; static int ni6501_port_command(struct comedi_device *dev, int command, unsigned int val, u8 *bitmap) { struct usb_device *usb = comedi_to_usb_dev(dev); struct ni6501_private *devpriv = dev->private; int request_size, response_size; u8 *tx = devpriv->usb_tx_buf; int ret; if (command != SET_PORT_DIR && !bitmap) return -EINVAL; mutex_lock(&devpriv->mut); switch (command) { case READ_PORT: request_size = sizeof(READ_PORT_REQUEST); response_size = sizeof(READ_PORT_RESPONSE); memcpy(tx, READ_PORT_REQUEST, request_size); tx[14] = val & 0xff; break; case WRITE_PORT: request_size = sizeof(WRITE_PORT_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, WRITE_PORT_REQUEST, request_size); tx[14] = val & 0xff; tx[17] = *bitmap; break; case SET_PORT_DIR: request_size = sizeof(SET_PORT_DIR_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, SET_PORT_DIR_REQUEST, request_size); tx[14] = val & 0xff; tx[15] = (val >> 8) & 0xff; tx[16] = (val >> 16) & 0xff; break; default: ret = -EINVAL; goto end; } ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->ep_tx->bEndpointAddress), devpriv->usb_tx_buf, request_size, NULL, NI6501_TIMEOUT); if (ret) goto end; ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->ep_rx->bEndpointAddress), devpriv->usb_rx_buf, response_size, NULL, NI6501_TIMEOUT); if (ret) goto end; /* Check if results are valid */ if (command == READ_PORT) { *bitmap = devpriv->usb_rx_buf[14]; /* mask bitmap for comparing */ devpriv->usb_rx_buf[14] = 0x00; if (memcmp(devpriv->usb_rx_buf, READ_PORT_RESPONSE, sizeof(READ_PORT_RESPONSE))) { ret = -EINVAL; } } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE, sizeof(GENERIC_RESPONSE))) { ret = -EINVAL; } end: mutex_unlock(&devpriv->mut); return ret; } static int ni6501_counter_command(struct comedi_device *dev, int command, u32 *val) { struct usb_device *usb = comedi_to_usb_dev(dev); struct ni6501_private *devpriv = dev->private; int request_size, response_size; u8 *tx = devpriv->usb_tx_buf; int ret; if ((command == READ_COUNTER || command == WRITE_COUNTER) && !val) return -EINVAL; mutex_lock(&devpriv->mut); switch (command) { case START_COUNTER: request_size = sizeof(START_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, START_COUNTER_REQUEST, request_size); break; case STOP_COUNTER: request_size = sizeof(STOP_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, STOP_COUNTER_REQUEST, request_size); break; case READ_COUNTER: request_size = sizeof(READ_COUNTER_REQUEST); response_size = sizeof(READ_COUNTER_RESPONSE); memcpy(tx, READ_COUNTER_REQUEST, request_size); break; case WRITE_COUNTER: request_size = sizeof(WRITE_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, WRITE_COUNTER_REQUEST, request_size); /* Setup tx packet: bytes 12,13,14,15 hold the */ /* u32 counter value (Big Endian) */ *((__be32 *)&tx[12]) = cpu_to_be32(*val); break; default: ret = -EINVAL; goto end; } ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->ep_tx->bEndpointAddress), devpriv->usb_tx_buf, request_size, NULL, NI6501_TIMEOUT); if (ret) goto end; ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->ep_rx->bEndpointAddress), devpriv->usb_rx_buf, response_size, NULL, NI6501_TIMEOUT); if (ret) goto end; /* Check if results are valid */ if (command == READ_COUNTER) { int i; /* Read counter value: bytes 12,13,14,15 of rx packet */ /* hold the u32 counter value (Big Endian) */ *val = be32_to_cpu(*((__be32 *)&devpriv->usb_rx_buf[12])); /* mask counter value for comparing */ for (i = 12; i < sizeof(READ_COUNTER_RESPONSE); ++i) devpriv->usb_rx_buf[i] = 0x00; if (memcmp(devpriv->usb_rx_buf, READ_COUNTER_RESPONSE, sizeof(READ_COUNTER_RESPONSE))) { ret = -EINVAL; } } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE, sizeof(GENERIC_RESPONSE))) { ret = -EINVAL; } end: mutex_unlock(&devpriv->mut); return ret; } static int ni6501_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); if (ret) return ret; ret = ni6501_port_command(dev, SET_PORT_DIR, s->io_bits, NULL); if (ret) return ret; return insn->n; } static int ni6501_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; int ret; u8 port; u8 bitmap; mask = comedi_dio_update_state(s, data); for (port = 0; port < 3; port++) { if (mask & (0xFF << port * 8)) { bitmap = (s->state >> port * 8) & 0xFF; ret = ni6501_port_command(dev, WRITE_PORT, port, &bitmap); if (ret) return ret; } } data[1] = 0; for (port = 0; port < 3; port++) { ret = ni6501_port_command(dev, READ_PORT, port, &bitmap); if (ret) return ret; data[1] |= bitmap << port * 8; } return insn->n; } static int ni6501_cnt_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; u32 val = 0; switch (data[0]) { case INSN_CONFIG_ARM: ret = ni6501_counter_command(dev, START_COUNTER, NULL); break; case INSN_CONFIG_DISARM: ret = ni6501_counter_command(dev, STOP_COUNTER, NULL); break; case INSN_CONFIG_RESET: ret = ni6501_counter_command(dev, STOP_COUNTER, NULL); if (ret) break; ret = ni6501_counter_command(dev, WRITE_COUNTER, &val); break; default: return -EINVAL; } return ret ? ret : insn->n; } static int ni6501_cnt_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; u32 val; unsigned int i; for (i = 0; i < insn->n; i++) { ret = ni6501_counter_command(dev, READ_COUNTER, &val); if (ret) return ret; data[i] = val; } return insn->n; } static int ni6501_cnt_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; if (insn->n) { u32 val = data[insn->n - 1]; ret = ni6501_counter_command(dev, WRITE_COUNTER, &val); if (ret) return ret; } return insn->n; } static int ni6501_alloc_usb_buffers(struct comedi_device *dev) { struct ni6501_private *devpriv = dev->private; size_t size; size = usb_endpoint_maxp(devpriv->ep_rx); devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_rx_buf) return -ENOMEM; size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; return 0; } static int ni6501_find_endpoints(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv = dev->private; struct usb_host_interface *iface_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_desc; int i; if (iface_desc->desc.bNumEndpoints != 2) { dev_err(dev->class_dev, "Wrong number of endpoints\n"); return -ENODEV; } for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { ep_desc = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc)) { if (!devpriv->ep_rx) devpriv->ep_rx = ep_desc; continue; } if (usb_endpoint_is_bulk_out(ep_desc)) { if (!devpriv->ep_tx) devpriv->ep_tx = ep_desc; continue; } } if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; if (usb_endpoint_maxp(devpriv->ep_rx) < RX_MAX_SIZE) return -ENODEV; if (usb_endpoint_maxp(devpriv->ep_tx) < TX_MAX_SIZE) return -ENODEV; return 0; } static int ni6501_auto_attach(struct comedi_device *dev, unsigned long context) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv; struct comedi_subdevice *s; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; mutex_init(&devpriv->mut); usb_set_intfdata(intf, devpriv); ret = ni6501_find_endpoints(dev); if (ret) return ret; ret = ni6501_alloc_usb_buffers(dev); if (ret) return ret; ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; /* Digital Input/Output subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 24; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = ni6501_dio_insn_bits; s->insn_config = ni6501_dio_insn_config; /* Counter subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL; s->n_chan = 1; s->maxdata = 0xffffffff; s->insn_read = ni6501_cnt_insn_read; s->insn_write = ni6501_cnt_insn_write; s->insn_config = ni6501_cnt_insn_config; return 0; } static void ni6501_detach(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv = dev->private; if (!devpriv) return; mutex_destroy(&devpriv->mut); usb_set_intfdata(intf, NULL); kfree(devpriv->usb_rx_buf); kfree(devpriv->usb_tx_buf); } static struct comedi_driver ni6501_driver = { .module = THIS_MODULE, .driver_name = "ni6501", .auto_attach = ni6501_auto_attach, .detach = ni6501_detach, }; static int ni6501_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return comedi_usb_auto_config(intf, &ni6501_driver, id->driver_info); } static const struct usb_device_id ni6501_usb_table[] = { { USB_DEVICE(0x3923, 0x718a) }, { } }; MODULE_DEVICE_TABLE(usb, ni6501_usb_table); static struct usb_driver ni6501_usb_driver = { .name = "ni6501", .id_table = ni6501_usb_table, .probe = ni6501_usb_probe, .disconnect = comedi_usb_auto_unconfig, }; module_comedi_usb_driver(ni6501_driver, ni6501_usb_driver); MODULE_AUTHOR("Luca Ellero"); MODULE_DESCRIPTION("Comedi driver for National Instruments USB-6501"); MODULE_LICENSE("GPL"); |
2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 | // SPDX-License-Identifier: GPL-2.0-only /* * scsi.c Copyright (C) 1992 Drew Eckhardt * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * Copyright (C) 2002, 2003 Christoph Hellwig * * generic mid-level SCSI driver * Initial versions: Drew Eckhardt * Subsequent revisions: Eric Youngdale * * <drew@colorado.edu> * * Bug correction thanks go to : * Rik Faith <faith@cs.unc.edu> * Tommy Thorn <tthorn> * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> * * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to * add scatter-gather, multiple outstanding request, and other * enhancements. * * Native multichannel, wide scsi, /proc/scsi and hot plugging * support added by Michael Neuffer <mike@i-connect.net> * * Added request_module("scsi_hostadapter") for kerneld: * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) * Bjorn Ekwall <bj0rn@blox.se> * (changed to kmod) * * Major improvements to the timeout, abort, and reset processing, * as well as performance modifications for large queue depths by * Leonard N. Zubkoff <lnz@dandelion.com> * * Converted cli() code to spinlocks, Ingo Molnar * * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli * * out_of_space hacks, D. Gilbert (dpg) 990608 */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/kmod.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "scsi_priv.h" #include "scsi_logging.h" #define CREATE_TRACE_POINTS #include <trace/events/scsi.h> /* * Definitions and constants. */ /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. */ unsigned int scsi_logging_level; #if defined(CONFIG_SCSI_LOGGING) EXPORT_SYMBOL(scsi_logging_level); #endif #ifdef CONFIG_SCSI_LOGGING void scsi_log_send(struct scsi_cmnd *cmd) { unsigned int level; /* * If ML QUEUE log level is greater than or equal to: * * 1: nothing (match completion) * * 2: log opcode + command of all commands + cmd address * * 3: same as 2 * * 4: same as 3 */ if (unlikely(scsi_logging_level)) { level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, SCSI_LOG_MLQUEUE_BITS); if (level > 1) { scmd_printk(KERN_INFO, cmd, "Send: scmd 0x%p\n", cmd); scsi_print_command(cmd); } } } void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) { unsigned int level; /* * If ML COMPLETE log level is greater than or equal to: * * 1: log disposition, result, opcode + command, and conditionally * sense data for failures or non SUCCESS dispositions. * * 2: same as 1 but for all command completions. * * 3: same as 2 * * 4: same as 3 plus dump extra junk */ if (unlikely(scsi_logging_level)) { level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, SCSI_LOG_MLCOMPLETE_BITS); if (((level > 0) && (cmd->result || disposition != SUCCESS)) || (level > 1)) { scsi_print_result(cmd, "Done", disposition); scsi_print_command(cmd); if (scsi_status_is_check_condition(cmd->result)) scsi_print_sense(cmd); if (level > 3) scmd_printk(KERN_INFO, cmd, "scsi host busy %d failed %d\n", scsi_host_busy(cmd->device->host), cmd->device->host->host_failed); } } } #endif /** * scsi_finish_command - cleanup and pass command back to upper layer * @cmd: the command * * Description: Pass command off to upper layer for finishing of I/O * request, waking processes that are waiting on results, * etc. */ void scsi_finish_command(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct scsi_target *starget = scsi_target(sdev); struct Scsi_Host *shost = sdev->host; struct scsi_driver *drv; unsigned int good_bytes; scsi_device_unbusy(sdev, cmd); /* * Clear the flags that say that the device/target/host is no longer * capable of accepting new commands. */ if (atomic_read(&shost->host_blocked)) atomic_set(&shost->host_blocked, 0); if (atomic_read(&starget->target_blocked)) atomic_set(&starget->target_blocked, 0); if (atomic_read(&sdev->device_blocked)) atomic_set(&sdev->device_blocked, 0); SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, "Notifying upper driver of completion " "(result %x)\n", cmd->result)); good_bytes = scsi_bufflen(cmd); if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { int old_good_bytes = good_bytes; drv = scsi_cmd_to_driver(cmd); if (drv->done) good_bytes = drv->done(cmd); /* * USB may not give sense identifying bad sector and * simply return a residue instead, so subtract off the * residue if drv->done() error processing indicates no * change to the completion length. */ if (good_bytes == old_good_bytes) good_bytes -= scsi_get_resid(cmd); } scsi_io_completion(cmd, good_bytes); } /* * 4096 is big enough for saturating fast SCSI LUNs. */ int scsi_device_max_queue_depth(struct scsi_device *sdev) { return min_t(int, sdev->host->can_queue, 4096); } /** * scsi_change_queue_depth - change a device's queue depth * @sdev: SCSI Device in question * @depth: number of commands allowed to be queued to the driver * * Sets the device queue depth and returns the new value. */ int scsi_change_queue_depth(struct scsi_device *sdev, int depth) { depth = min_t(int, depth, scsi_device_max_queue_depth(sdev)); if (depth > 0) { sdev->queue_depth = depth; wmb(); } if (sdev->request_queue) blk_set_queue_depth(sdev->request_queue, depth); sbitmap_resize(&sdev->budget_map, sdev->queue_depth); return sdev->queue_depth; } EXPORT_SYMBOL(scsi_change_queue_depth); /** * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth * @sdev: SCSI Device in question * @depth: Current number of outstanding SCSI commands on this device, * not counting the one returned as QUEUE_FULL. * * Description: This function will track successive QUEUE_FULL events on a * specific SCSI device to determine if and when there is a * need to adjust the queue depth on the device. * * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth, * -1 - Drop back to untagged operation using host->cmd_per_lun * as the untagged command depth * * Lock Status: None held on entry * * Notes: Low level drivers may call this at any time and we will do * "The Right Thing." We are interrupt context safe. */ int scsi_track_queue_full(struct scsi_device *sdev, int depth) { /* * Don't let QUEUE_FULLs on the same * jiffies count, they could all be from * same event. */ if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) return 0; sdev->last_queue_full_time = jiffies; if (sdev->last_queue_full_depth != depth) { sdev->last_queue_full_count = 1; sdev->last_queue_full_depth = depth; } else { sdev->last_queue_full_count++; } if (sdev->last_queue_full_count <= 10) return 0; return scsi_change_queue_depth(sdev, depth); } EXPORT_SYMBOL(scsi_track_queue_full); /** * scsi_vpd_inquiry - Request a device provide us with a VPD page * @sdev: The device to ask * @buffer: Where to put the result * @page: Which Vital Product Data to return * @len: The length of the buffer * * This is an internal helper function. You probably want to use * scsi_get_vpd_page instead. * * Returns size of the vpd page on success or a negative error number. */ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, u8 page, unsigned len) { int result; unsigned char cmd[16]; if (len < 4) return -EINVAL; cmd[0] = INQUIRY; cmd[1] = 1; /* EVPD */ cmd[2] = page; cmd[3] = len >> 8; cmd[4] = len & 0xff; cmd[5] = 0; /* Control byte */ /* * I'm not convinced we need to try quite this hard to get VPD, but * all the existing users tried this hard. */ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, 30 * HZ, 3, NULL); if (result) return -EIO; /* * Sanity check that we got the page back that we asked for and that * the page size is not 0. */ if (buffer[1] != page) return -EIO; result = get_unaligned_be16(&buffer[2]); if (!result) return -EIO; return result + 4; } enum scsi_vpd_parameters { SCSI_VPD_HEADER_SIZE = 4, SCSI_VPD_LIST_SIZE = 36, }; static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) { unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4); int result; if (sdev->no_vpd_size) return SCSI_DEFAULT_VPD_LEN; /* * Fetch the supported pages VPD and validate that the requested page * number is present. */ if (page != 0) { result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd)); if (result < SCSI_VPD_HEADER_SIZE) return 0; if (result > sizeof(vpd)) { dev_warn_once(&sdev->sdev_gendev, "%s: long VPD page 0 length: %d bytes\n", __func__, result); result = sizeof(vpd); } result -= SCSI_VPD_HEADER_SIZE; if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) return 0; } /* * Fetch the VPD page header to find out how big the page * is. This is done to prevent problems on legacy devices * which can not handle allocation lengths as large as * potentially requested by the caller. */ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE); if (result < 0) return 0; if (result < SCSI_VPD_HEADER_SIZE) { dev_warn_once(&sdev->sdev_gendev, "%s: short VPD page 0x%02x length: %d bytes\n", __func__, page, result); return 0; } return result; } /** * scsi_get_vpd_page - Get Vital Product Data from a SCSI device * @sdev: The device to ask * @page: Which Vital Product Data to return * @buf: where to store the VPD * @buf_len: number of bytes in the VPD buffer area * * SCSI devices may optionally supply Vital Product Data. Each 'page' * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). * If the device supports this VPD page, this routine fills @buf * with the data from that page and return 0. If the VPD page is not * supported or its content cannot be retrieved, -EINVAL is returned. */ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, int buf_len) { int result, vpd_len; if (!scsi_device_supports_vpd(sdev)) return -EINVAL; vpd_len = scsi_get_vpd_size(sdev, page); if (vpd_len <= 0) return -EINVAL; vpd_len = min(vpd_len, buf_len); /* * Fetch the actual page. Since the appropriate size was reported * by the device it is now safe to ask for something bigger. */ memset(buf, 0, buf_len); result = scsi_vpd_inquiry(sdev, buf, page, vpd_len); if (result < 0) return -EINVAL; else if (result > vpd_len) dev_warn_once(&sdev->sdev_gendev, "%s: VPD page 0x%02x result %d > %d bytes\n", __func__, page, result, vpd_len); return 0; } EXPORT_SYMBOL_GPL(scsi_get_vpd_page); /** * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device * @sdev: The device to ask * @page: Which Vital Product Data to return * * Returns %NULL upon failure. */ static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page) { struct scsi_vpd *vpd_buf; int vpd_len, result; vpd_len = scsi_get_vpd_size(sdev, page); if (vpd_len <= 0) return NULL; retry_pg: /* * Fetch the actual page. Since the appropriate size was reported * by the device it is now safe to ask for something bigger. */ vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL); if (!vpd_buf) return NULL; result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len); if (result < 0) { kfree(vpd_buf); return NULL; } if (result > vpd_len) { dev_warn_once(&sdev->sdev_gendev, "%s: VPD page 0x%02x result %d > %d bytes\n", __func__, page, result, vpd_len); vpd_len = result; kfree(vpd_buf); goto retry_pg; } vpd_buf->len = result; return vpd_buf; } static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, struct scsi_vpd __rcu **sdev_vpd_buf) { struct scsi_vpd *vpd_buf; vpd_buf = scsi_get_vpd_buf(sdev, page); if (!vpd_buf) return; mutex_lock(&sdev->inquiry_mutex); vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf, lockdep_is_held(&sdev->inquiry_mutex)); mutex_unlock(&sdev->inquiry_mutex); if (vpd_buf) kfree_rcu(vpd_buf, rcu); } /** * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure * @sdev: The device to ask * * Attach the 'Device Identification' VPD page (0x83) and the * 'Unit Serial Number' VPD page (0x80) to a SCSI device * structure. This information can be used to identify the device * uniquely. */ void scsi_attach_vpd(struct scsi_device *sdev) { int i; struct scsi_vpd *vpd_buf; if (!scsi_device_supports_vpd(sdev)) return; /* Ask for all the pages supported by this device */ vpd_buf = scsi_get_vpd_buf(sdev, 0); if (!vpd_buf) return; for (i = 4; i < vpd_buf->len; i++) { switch (vpd_buf->data[i]) { case 0x0: scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); break; case 0x80: scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); break; case 0x83: scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); break; case 0x89: scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); break; case 0xb0: scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0); break; case 0xb1: scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1); break; case 0xb2: scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2); break; case 0xb7: scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7); break; default: break; } } kfree(vpd_buf); } /** * scsi_report_opcode - Find out if a given command is supported * @sdev: scsi device to query * @buffer: scratch buffer (must be at least 20 bytes long) * @len: length of buffer * @opcode: opcode for the command to look up * @sa: service action for the command to look up * * Uses the REPORT SUPPORTED OPERATION CODES to check support for the * command identified with @opcode and @sa. If the command does not * have a service action, @sa must be 0. Returns -EINVAL if RSOC fails, * 0 if the command is not supported and 1 if the device claims to * support the command. */ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, unsigned int len, unsigned char opcode, unsigned short sa) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; int result, request_len; const struct scsi_exec_args exec_args = { .sshdr = &sshdr, }; if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) return -EINVAL; /* RSOC header + size of command we are asking about */ request_len = 4 + COMMAND_SIZE(opcode); if (request_len > len) { dev_warn_once(&sdev->sdev_gendev, "%s: len %u bytes, opcode 0x%02x needs %u\n", __func__, len, opcode, request_len); return -EINVAL; } memset(cmd, 0, 16); cmd[0] = MAINTENANCE_IN; cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; if (!sa) { cmd[2] = 1; /* One command format */ cmd[3] = opcode; } else { cmd[2] = 3; /* One command format with service action */ cmd[3] = opcode; put_unaligned_be16(sa, &cmd[4]); } put_unaligned_be32(request_len, &cmd[6]); memset(buffer, 0, len); result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, request_len, 30 * HZ, 3, &exec_args); if (result < 0) return result; if (result && scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST && (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) return -EINVAL; if ((buffer[1] & 3) == 3) /* Command supported */ return 1; return 0; } EXPORT_SYMBOL(scsi_report_opcode); #define SCSI_CDL_CHECK_BUF_LEN 64 static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa, unsigned char *buf) { int ret; u8 cdlp; /* Check operation code */ ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa); if (ret <= 0) return false; if ((buf[1] & 0x03) != 0x03) return false; /* * See SPC-6, One_command parameter data format for * REPORT SUPPORTED OPERATION CODES. We have the following cases * depending on rwcdlp (buf[0] & 0x01) value: * - rwcdlp == 0: then cdlp indicates support for the A mode page when * it is equal to 1 and for the B mode page when it is * equal to 2. * - rwcdlp == 1: then cdlp indicates support for the T2A mode page * when it is equal to 1 and for the T2B mode page when * it is equal to 2. * Overall, to detect support for command duration limits, we only need * to check that cdlp is 1 or 2. */ cdlp = (buf[1] & 0x18) >> 3; return cdlp == 0x01 || cdlp == 0x02; } /** * scsi_cdl_check - Check if a SCSI device supports Command Duration Limits * @sdev: The device to check */ void scsi_cdl_check(struct scsi_device *sdev) { bool cdl_supported; unsigned char *buf; /* * Support for CDL was defined in SPC-5. Ignore devices reporting an * lower SPC version. This also avoids problems with old drives choking * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a * service action specified, as done in scsi_cdl_check_cmd(). */ if (sdev->scsi_level < SCSI_SPC_5) { sdev->cdl_supported = 0; return; } buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL); if (!buf) { sdev->cdl_supported = 0; return; } /* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */ cdl_supported = scsi_cdl_check_cmd(sdev, READ_16, 0, buf) || scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) || scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) || scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf); if (cdl_supported) { /* * We have CDL support: force the use of READ16/WRITE16. * READ32 and WRITE32 will be used for devices that support * the T10_PI_TYPE2_PROTECTION protection type. */ sdev->use_16_for_rw = 1; sdev->use_10_for_rw = 0; sdev->cdl_supported = 1; /* * If the device supports CDL, make sure that the current drive * feature status is consistent with the user controlled * cdl_enable state. */ scsi_cdl_enable(sdev, sdev->cdl_enable); } else { sdev->cdl_supported = 0; } kfree(buf); } /** * scsi_cdl_enable - Enable or disable a SCSI device supports for Command * Duration Limits * @sdev: The target device * @enable: the target state */ int scsi_cdl_enable(struct scsi_device *sdev, bool enable) { char buf[64]; bool is_ata; int ret; if (!sdev->cdl_supported) return -EOPNOTSUPP; rcu_read_lock(); is_ata = rcu_dereference(sdev->vpd_pg89); rcu_read_unlock(); /* * For ATA devices, CDL needs to be enabled with a SET FEATURES command. */ if (is_ata) { struct scsi_mode_data data; struct scsi_sense_hdr sshdr; char *buf_data; int len; ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf), 5 * HZ, 3, &data, NULL); if (ret) return -EINVAL; /* Enable or disable CDL using the ATA feature page */ len = min_t(size_t, sizeof(buf), data.length - data.header_length - data.block_descriptor_length); buf_data = buf + data.header_length + data.block_descriptor_length; /* * If we want to enable CDL and CDL is already enabled on the * device, do nothing. This avoids needlessly resetting the CDL * statistics on the device as that is implied by the CDL enable * action. Similar to this, there is no need to do anything if * we want to disable CDL and CDL is already disabled. */ if (enable) { if ((buf_data[4] & 0x03) == 0x02) goto out; buf_data[4] &= ~0x03; buf_data[4] |= 0x02; } else { if ((buf_data[4] & 0x03) == 0x00) goto out; buf_data[4] &= ~0x03; } ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, &data, &sshdr); if (ret) { if (ret > 0 && scsi_sense_valid(&sshdr)) scsi_print_sense_hdr(sdev, dev_name(&sdev->sdev_gendev), &sshdr); return ret; } } out: sdev->cdl_enable = enable; return 0; } /** * scsi_device_get - get an additional reference to a scsi_device * @sdev: device to get a reference to * * Description: Gets a reference to the scsi_device and increments the use count * of the underlying LLDD module. You must hold host_lock of the * parent Scsi_Host or already have a reference when calling this. * * This will fail if a device is deleted or cancelled, or when the LLD module * is in the process of being unloaded. */ int scsi_device_get(struct scsi_device *sdev) { if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) goto fail; if (!try_module_get(sdev->host->hostt->module)) goto fail; if (!get_device(&sdev->sdev_gendev)) goto fail_put_module; return 0; fail_put_module: module_put(sdev->host->hostt->module); fail: return -ENXIO; } EXPORT_SYMBOL(scsi_device_get); /** * scsi_device_put - release a reference to a scsi_device * @sdev: device to release a reference on. * * Description: Release a reference to the scsi_device and decrements the use * count of the underlying LLDD module. The device is freed once the last * user vanishes. */ void scsi_device_put(struct scsi_device *sdev) { struct module *mod = sdev->host->hostt->module; put_device(&sdev->sdev_gendev); module_put(mod); } EXPORT_SYMBOL(scsi_device_put); /* helper for shost_for_each_device, see that for documentation */ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, struct scsi_device *prev) { struct list_head *list = (prev ? &prev->siblings : &shost->__devices); struct scsi_device *next = NULL; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); while (list->next != &shost->__devices) { next = list_entry(list->next, struct scsi_device, siblings); /* skip devices that we can't get a reference to */ if (!scsi_device_get(next)) break; next = NULL; list = list->next; } spin_unlock_irqrestore(shost->host_lock, flags); if (prev) scsi_device_put(prev); return next; } EXPORT_SYMBOL(__scsi_iterate_devices); /** * starget_for_each_device - helper to walk all devices of a target * @starget: target whose devices we want to iterate over. * @data: Opaque passed to each function call. * @fn: Function to call on each device * * This traverses over each device of @starget. The devices have * a reference that must be released by scsi_host_put when breaking * out of the loop. */ void starget_for_each_device(struct scsi_target *starget, void *data, void (*fn)(struct scsi_device *, void *)) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct scsi_device *sdev; shost_for_each_device(sdev, shost) { if ((sdev->channel == starget->channel) && (sdev->id == starget->id)) fn(sdev, data); } } EXPORT_SYMBOL(starget_for_each_device); /** * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) * @starget: target whose devices we want to iterate over. * @data: parameter for callback @fn() * @fn: callback function that is invoked for each device * * This traverses over each device of @starget. It does _not_ * take a reference on the scsi_device, so the whole loop must be * protected by shost->host_lock. * * Note: The only reason why drivers would want to use this is because * they need to access the device list in irq context. Otherwise you * really want to use starget_for_each_device instead. **/ void __starget_for_each_device(struct scsi_target *starget, void *data, void (*fn)(struct scsi_device *, void *)) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct scsi_device *sdev; __shost_for_each_device(sdev, shost) { if ((sdev->channel == starget->channel) && (sdev->id == starget->id)) fn(sdev, data); } } EXPORT_SYMBOL(__starget_for_each_device); /** * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @lun for a given * @starget. The returned scsi_device does not have an additional * reference. You must hold the host's host_lock over this call and * any access to the returned scsi_device. A scsi_device in state * SDEV_DEL is skipped. * * Note: The only reason why drivers should use this is because * they need to access the device list in irq context. Otherwise you * really want to use scsi_device_lookup_by_target instead. **/ struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, u64 lun) { struct scsi_device *sdev; list_for_each_entry(sdev, &starget->devices, same_target_siblings) { if (sdev->sdev_state == SDEV_DEL) continue; if (sdev->lun ==lun) return sdev; } return NULL; } EXPORT_SYMBOL(__scsi_device_lookup_by_target); /** * scsi_device_lookup_by_target - find a device given the target * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @lun for a given * @starget. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, u64 lun) { struct scsi_device *sdev; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sdev = __scsi_device_lookup_by_target(starget, lun); if (sdev && scsi_device_get(sdev)) sdev = NULL; spin_unlock_irqrestore(shost->host_lock, flags); return sdev; } EXPORT_SYMBOL(scsi_device_lookup_by_target); /** * __scsi_device_lookup - find a device given the host (UNLOCKED) * @shost: SCSI host pointer * @channel: SCSI channel (zero if only one channel) * @id: SCSI target number (physical unit number) * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @channel, @id, @lun * for a given host. The returned scsi_device does not have an additional * reference. You must hold the host's host_lock over this call and any access * to the returned scsi_device. * * Note: The only reason why drivers would want to use this is because * they need to access the device list in irq context. Otherwise you * really want to use scsi_device_lookup instead. **/ struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct scsi_device *sdev; list_for_each_entry(sdev, &shost->__devices, siblings) { if (sdev->sdev_state == SDEV_DEL) continue; if (sdev->channel == channel && sdev->id == id && sdev->lun ==lun) return sdev; } return NULL; } EXPORT_SYMBOL(__scsi_device_lookup); /** * scsi_device_lookup - find a device given the host * @shost: SCSI host pointer * @channel: SCSI channel (zero if only one channel) * @id: SCSI target number (physical unit number) * @lun: SCSI Logical Unit Number * * Description: Looks up the scsi_device with the specified @channel, @id, @lun * for a given host. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct scsi_device *sdev; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sdev = __scsi_device_lookup(shost, channel, id, lun); if (sdev && scsi_device_get(sdev)) sdev = NULL; spin_unlock_irqrestore(shost->host_lock, flags); return sdev; } EXPORT_SYMBOL(scsi_device_lookup); MODULE_DESCRIPTION("SCSI core"); MODULE_LICENSE("GPL"); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); static int __init init_scsi(void) { int error; error = scsi_init_procfs(); if (error) goto cleanup_queue; error = scsi_init_devinfo(); if (error) goto cleanup_procfs; error = scsi_init_hosts(); if (error) goto cleanup_devlist; error = scsi_init_sysctl(); if (error) goto cleanup_hosts; error = scsi_sysfs_register(); if (error) goto cleanup_sysctl; scsi_netlink_init(); printk(KERN_NOTICE "SCSI subsystem initialized\n"); return 0; cleanup_sysctl: scsi_exit_sysctl(); cleanup_hosts: scsi_exit_hosts(); cleanup_devlist: scsi_exit_devinfo(); cleanup_procfs: scsi_exit_procfs(); cleanup_queue: scsi_exit_queue(); printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", -error); return error; } static void __exit exit_scsi(void) { scsi_netlink_exit(); scsi_sysfs_unregister(); scsi_exit_sysctl(); scsi_exit_hosts(); scsi_exit_devinfo(); scsi_exit_procfs(); scsi_exit_queue(); } subsys_initcall(init_scsi); module_exit(exit_scsi); |
1 1 1 1 2 2 4 3 2 1 2 2 2 2 1 2 2 1 2 1 2 4 4 4 4 4 2 2 1 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 | // SPDX-License-Identifier: GPL-2.0+ /* * vmk80xx.c * Velleman USB Board Low-Level Driver * * Copyright (C) 2009 Manuel Gebele <forensixs@gmx.de>, Germany * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2000 David A. Schleef <ds@schleef.org> */ /* * Driver: vmk80xx * Description: Velleman USB Board Low-Level Driver * Devices: [Velleman] K8055 (K8055/VM110), K8061 (K8061/VM140), * VM110 (K8055/VM110), VM140 (K8061/VM140) * Author: Manuel Gebele <forensixs@gmx.de> * Updated: Sun, 10 May 2009 11:14:59 +0200 * Status: works * * Supports: * - analog input * - analog output * - digital input * - digital output * - counter * - pwm */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/errno.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/uaccess.h> #include <linux/comedi/comedi_usb.h> enum { DEVICE_VMK8055, DEVICE_VMK8061 }; #define VMK8055_DI_REG 0x00 #define VMK8055_DO_REG 0x01 #define VMK8055_AO1_REG 0x02 #define VMK8055_AO2_REG 0x03 #define VMK8055_AI1_REG 0x02 #define VMK8055_AI2_REG 0x03 #define VMK8055_CNT1_REG 0x04 #define VMK8055_CNT2_REG 0x06 #define VMK8061_CH_REG 0x01 #define VMK8061_DI_REG 0x01 #define VMK8061_DO_REG 0x01 #define VMK8061_PWM_REG1 0x01 #define VMK8061_PWM_REG2 0x02 #define VMK8061_CNT_REG 0x02 #define VMK8061_AO_REG 0x02 #define VMK8061_AI_REG1 0x02 #define VMK8061_AI_REG2 0x03 #define VMK8055_CMD_RST 0x00 #define VMK8055_CMD_DEB1_TIME 0x01 #define VMK8055_CMD_DEB2_TIME 0x02 #define VMK8055_CMD_RST_CNT1 0x03 #define VMK8055_CMD_RST_CNT2 0x04 #define VMK8055_CMD_WRT_AD 0x05 #define VMK8061_CMD_RD_AI 0x00 #define VMK8061_CMR_RD_ALL_AI 0x01 /* !non-active! */ #define VMK8061_CMD_SET_AO 0x02 #define VMK8061_CMD_SET_ALL_AO 0x03 /* !non-active! */ #define VMK8061_CMD_OUT_PWM 0x04 #define VMK8061_CMD_RD_DI 0x05 #define VMK8061_CMD_DO 0x06 /* !non-active! */ #define VMK8061_CMD_CLR_DO 0x07 #define VMK8061_CMD_SET_DO 0x08 #define VMK8061_CMD_RD_CNT 0x09 /* TODO: completely pointless? */ #define VMK8061_CMD_RST_CNT 0x0a /* TODO: completely pointless? */ #define VMK8061_CMD_RD_VERSION 0x0b /* internal usage */ #define VMK8061_CMD_RD_JMP_STAT 0x0c /* TODO: not implemented yet */ #define VMK8061_CMD_RD_PWR_STAT 0x0d /* internal usage */ #define VMK8061_CMD_RD_DO 0x0e #define VMK8061_CMD_RD_AO 0x0f #define VMK8061_CMD_RD_PWM 0x10 #define IC3_VERSION BIT(0) #define IC6_VERSION BIT(1) #define MIN_BUF_SIZE 64 #define PACKET_TIMEOUT 10000 /* ms */ enum vmk80xx_model { VMK8055_MODEL, VMK8061_MODEL }; static const struct comedi_lrange vmk8061_range = { 2, { UNI_RANGE(5), UNI_RANGE(10) } }; struct vmk80xx_board { const char *name; enum vmk80xx_model model; const struct comedi_lrange *range; int ai_nchans; unsigned int ai_maxdata; int ao_nchans; int di_nchans; unsigned int cnt_maxdata; int pwm_nchans; unsigned int pwm_maxdata; }; static const struct vmk80xx_board vmk80xx_boardinfo[] = { [DEVICE_VMK8055] = { .name = "K8055 (VM110)", .model = VMK8055_MODEL, .range = &range_unipolar5, .ai_nchans = 2, .ai_maxdata = 0x00ff, .ao_nchans = 2, .di_nchans = 6, .cnt_maxdata = 0xffff, }, [DEVICE_VMK8061] = { .name = "K8061 (VM140)", .model = VMK8061_MODEL, .range = &vmk8061_range, .ai_nchans = 8, .ai_maxdata = 0x03ff, .ao_nchans = 8, .di_nchans = 8, .cnt_maxdata = 0, /* unknown, device is not writeable */ .pwm_nchans = 1, .pwm_maxdata = 0x03ff, }, }; struct vmk80xx_private { struct usb_endpoint_descriptor *ep_rx; struct usb_endpoint_descriptor *ep_tx; struct semaphore limit_sem; unsigned char *usb_rx_buf; unsigned char *usb_tx_buf; enum vmk80xx_model model; }; static void vmk80xx_do_bulk_msg(struct comedi_device *dev) { struct vmk80xx_private *devpriv = dev->private; struct usb_device *usb = comedi_to_usb_dev(dev); __u8 tx_addr; __u8 rx_addr; unsigned int tx_pipe; unsigned int rx_pipe; size_t tx_size; size_t rx_size; tx_addr = devpriv->ep_tx->bEndpointAddress; rx_addr = devpriv->ep_rx->bEndpointAddress; tx_pipe = usb_sndbulkpipe(usb, tx_addr); rx_pipe = usb_rcvbulkpipe(usb, rx_addr); tx_size = usb_endpoint_maxp(devpriv->ep_tx); rx_size = usb_endpoint_maxp(devpriv->ep_rx); usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, tx_size, NULL, PACKET_TIMEOUT); usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, rx_size, NULL, PACKET_TIMEOUT); } static int vmk80xx_read_packet(struct comedi_device *dev) { struct vmk80xx_private *devpriv = dev->private; struct usb_device *usb = comedi_to_usb_dev(dev); struct usb_endpoint_descriptor *ep; unsigned int pipe; if (devpriv->model == VMK8061_MODEL) { vmk80xx_do_bulk_msg(dev); return 0; } ep = devpriv->ep_rx; pipe = usb_rcvintpipe(usb, ep->bEndpointAddress); return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf, usb_endpoint_maxp(ep), NULL, PACKET_TIMEOUT); } static int vmk80xx_write_packet(struct comedi_device *dev, int cmd) { struct vmk80xx_private *devpriv = dev->private; struct usb_device *usb = comedi_to_usb_dev(dev); struct usb_endpoint_descriptor *ep; unsigned int pipe; devpriv->usb_tx_buf[0] = cmd; if (devpriv->model == VMK8061_MODEL) { vmk80xx_do_bulk_msg(dev); return 0; } ep = devpriv->ep_tx; pipe = usb_sndintpipe(usb, ep->bEndpointAddress); return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf, usb_endpoint_maxp(ep), NULL, PACKET_TIMEOUT); } static int vmk80xx_reset_device(struct comedi_device *dev) { struct vmk80xx_private *devpriv = dev->private; size_t size; int retval; size = usb_endpoint_maxp(devpriv->ep_tx); memset(devpriv->usb_tx_buf, 0, size); retval = vmk80xx_write_packet(dev, VMK8055_CMD_RST); if (retval) return retval; /* set outputs to known state as we cannot read them */ return vmk80xx_write_packet(dev, VMK8055_CMD_WRT_AD); } static int vmk80xx_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; int chan; int reg[2]; int n; down(&devpriv->limit_sem); chan = CR_CHAN(insn->chanspec); switch (devpriv->model) { case VMK8055_MODEL: if (!chan) reg[0] = VMK8055_AI1_REG; else reg[0] = VMK8055_AI2_REG; break; case VMK8061_MODEL: default: reg[0] = VMK8061_AI_REG1; reg[1] = VMK8061_AI_REG2; devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AI; devpriv->usb_tx_buf[VMK8061_CH_REG] = chan; break; } for (n = 0; n < insn->n; n++) { if (vmk80xx_read_packet(dev)) break; if (devpriv->model == VMK8055_MODEL) { data[n] = devpriv->usb_rx_buf[reg[0]]; continue; } /* VMK8061_MODEL */ data[n] = devpriv->usb_rx_buf[reg[0]] + 256 * devpriv->usb_rx_buf[reg[1]]; } up(&devpriv->limit_sem); return n; } static int vmk80xx_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; int chan; int cmd; int reg; int n; down(&devpriv->limit_sem); chan = CR_CHAN(insn->chanspec); switch (devpriv->model) { case VMK8055_MODEL: cmd = VMK8055_CMD_WRT_AD; if (!chan) reg = VMK8055_AO1_REG; else reg = VMK8055_AO2_REG; break; default: /* NOTE: avoid compiler warnings */ cmd = VMK8061_CMD_SET_AO; reg = VMK8061_AO_REG; devpriv->usb_tx_buf[VMK8061_CH_REG] = chan; break; } for (n = 0; n < insn->n; n++) { devpriv->usb_tx_buf[reg] = data[n]; if (vmk80xx_write_packet(dev, cmd)) break; } up(&devpriv->limit_sem); return n; } static int vmk80xx_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; int chan; int reg; int n; down(&devpriv->limit_sem); chan = CR_CHAN(insn->chanspec); reg = VMK8061_AO_REG - 1; devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AO; for (n = 0; n < insn->n; n++) { if (vmk80xx_read_packet(dev)) break; data[n] = devpriv->usb_rx_buf[reg + chan]; } up(&devpriv->limit_sem); return n; } static int vmk80xx_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; unsigned char *rx_buf; int reg; int retval; down(&devpriv->limit_sem); rx_buf = devpriv->usb_rx_buf; if (devpriv->model == VMK8061_MODEL) { reg = VMK8061_DI_REG; devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_DI; } else { reg = VMK8055_DI_REG; } retval = vmk80xx_read_packet(dev); if (!retval) { if (devpriv->model == VMK8055_MODEL) data[1] = (((rx_buf[reg] >> 4) & 0x03) | ((rx_buf[reg] << 2) & 0x04) | ((rx_buf[reg] >> 3) & 0x18)); else data[1] = rx_buf[reg]; retval = 2; } up(&devpriv->limit_sem); return retval; } static int vmk80xx_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; unsigned char *rx_buf = devpriv->usb_rx_buf; unsigned char *tx_buf = devpriv->usb_tx_buf; int reg, cmd; int ret = 0; if (devpriv->model == VMK8061_MODEL) { reg = VMK8061_DO_REG; cmd = VMK8061_CMD_DO; } else { /* VMK8055_MODEL */ reg = VMK8055_DO_REG; cmd = VMK8055_CMD_WRT_AD; } down(&devpriv->limit_sem); if (comedi_dio_update_state(s, data)) { tx_buf[reg] = s->state; ret = vmk80xx_write_packet(dev, cmd); if (ret) goto out; } if (devpriv->model == VMK8061_MODEL) { tx_buf[0] = VMK8061_CMD_RD_DO; ret = vmk80xx_read_packet(dev); if (ret) goto out; data[1] = rx_buf[reg]; } else { data[1] = s->state; } out: up(&devpriv->limit_sem); return ret ? ret : insn->n; } static int vmk80xx_cnt_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; int chan; int reg[2]; int n; down(&devpriv->limit_sem); chan = CR_CHAN(insn->chanspec); switch (devpriv->model) { case VMK8055_MODEL: if (!chan) reg[0] = VMK8055_CNT1_REG; else reg[0] = VMK8055_CNT2_REG; break; case VMK8061_MODEL: default: reg[0] = VMK8061_CNT_REG; reg[1] = VMK8061_CNT_REG; devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_CNT; break; } for (n = 0; n < insn->n; n++) { if (vmk80xx_read_packet(dev)) break; if (devpriv->model == VMK8055_MODEL) data[n] = devpriv->usb_rx_buf[reg[0]]; else /* VMK8061_MODEL */ data[n] = devpriv->usb_rx_buf[reg[0] * (chan + 1) + 1] + 256 * devpriv->usb_rx_buf[reg[1] * 2 + 2]; } up(&devpriv->limit_sem); return n; } static int vmk80xx_cnt_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); int cmd; int reg; int ret; down(&devpriv->limit_sem); switch (data[0]) { case INSN_CONFIG_RESET: if (devpriv->model == VMK8055_MODEL) { if (!chan) { cmd = VMK8055_CMD_RST_CNT1; reg = VMK8055_CNT1_REG; } else { cmd = VMK8055_CMD_RST_CNT2; reg = VMK8055_CNT2_REG; } devpriv->usb_tx_buf[reg] = 0x00; } else { cmd = VMK8061_CMD_RST_CNT; } ret = vmk80xx_write_packet(dev, cmd); break; default: ret = -EINVAL; break; } up(&devpriv->limit_sem); return ret ? ret : insn->n; } static int vmk80xx_cnt_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; unsigned long debtime; unsigned long val; int chan; int cmd; int n; down(&devpriv->limit_sem); chan = CR_CHAN(insn->chanspec); if (!chan) cmd = VMK8055_CMD_DEB1_TIME; else cmd = VMK8055_CMD_DEB2_TIME; for (n = 0; n < insn->n; n++) { debtime = data[n]; if (debtime == 0) debtime = 1; /* TODO: Prevent overflows */ if (debtime > 7450) debtime = 7450; val = int_sqrt(debtime * 1000 / 115); if (((val + 1) * val) < debtime * 1000 / 115) val += 1; devpriv->usb_tx_buf[6 + chan] = val; if (vmk80xx_write_packet(dev, cmd)) break; } up(&devpriv->limit_sem); return n; } static int vmk80xx_pwm_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; unsigned char *tx_buf; unsigned char *rx_buf; int reg[2]; int n; down(&devpriv->limit_sem); tx_buf = devpriv->usb_tx_buf; rx_buf = devpriv->usb_rx_buf; reg[0] = VMK8061_PWM_REG1; reg[1] = VMK8061_PWM_REG2; tx_buf[0] = VMK8061_CMD_RD_PWM; for (n = 0; n < insn->n; n++) { if (vmk80xx_read_packet(dev)) break; data[n] = rx_buf[reg[0]] + 4 * rx_buf[reg[1]]; } up(&devpriv->limit_sem); return n; } static int vmk80xx_pwm_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct vmk80xx_private *devpriv = dev->private; unsigned char *tx_buf; int reg[2]; int cmd; int n; down(&devpriv->limit_sem); tx_buf = devpriv->usb_tx_buf; reg[0] = VMK8061_PWM_REG1; reg[1] = VMK8061_PWM_REG2; cmd = VMK8061_CMD_OUT_PWM; /* * The followin piece of code was translated from the inline * assembler code in the DLL source code. * * asm * mov eax, k ; k is the value (data[n]) * and al, 03h ; al are the lower 8 bits of eax * mov lo, al ; lo is the low part (tx_buf[reg[0]]) * mov eax, k * shr eax, 2 ; right shift eax register by 2 * mov hi, al ; hi is the high part (tx_buf[reg[1]]) * end; */ for (n = 0; n < insn->n; n++) { tx_buf[reg[0]] = (unsigned char)(data[n] & 0x03); tx_buf[reg[1]] = (unsigned char)(data[n] >> 2) & 0xff; if (vmk80xx_write_packet(dev, cmd)) break; } up(&devpriv->limit_sem); return n; } static int vmk80xx_find_usb_endpoints(struct comedi_device *dev) { struct vmk80xx_private *devpriv = dev->private; struct usb_interface *intf = comedi_to_usb_interface(dev); struct usb_host_interface *iface_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc; int ret; if (devpriv->model == VMK8061_MODEL) ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc, &ep_tx_desc, NULL, NULL); else ret = usb_find_common_endpoints(iface_desc, NULL, NULL, &ep_rx_desc, &ep_tx_desc); if (ret) return -ENODEV; devpriv->ep_rx = ep_rx_desc; devpriv->ep_tx = ep_tx_desc; if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx)) return -EINVAL; return 0; } static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) { struct vmk80xx_private *devpriv = dev->private; size_t size; size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_rx_buf) return -ENOMEM; size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; return 0; } static int vmk80xx_init_subdevices(struct comedi_device *dev) { const struct vmk80xx_board *board = dev->board_ptr; struct vmk80xx_private *devpriv = dev->private; struct comedi_subdevice *s; int n_subd; int ret; down(&devpriv->limit_sem); if (devpriv->model == VMK8055_MODEL) n_subd = 5; else n_subd = 6; ret = comedi_alloc_subdevices(dev, n_subd); if (ret) { up(&devpriv->limit_sem); return ret; } /* Analog input subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = board->ai_nchans; s->maxdata = board->ai_maxdata; s->range_table = board->range; s->insn_read = vmk80xx_ai_insn_read; /* Analog output subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND; s->n_chan = board->ao_nchans; s->maxdata = 0x00ff; s->range_table = board->range; s->insn_write = vmk80xx_ao_insn_write; if (devpriv->model == VMK8061_MODEL) { s->subdev_flags |= SDF_READABLE; s->insn_read = vmk80xx_ao_insn_read; } /* Digital input subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = board->di_nchans; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = vmk80xx_di_insn_bits; /* Digital output subdevice */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = vmk80xx_do_insn_bits; /* Counter subdevice */ s = &dev->subdevices[4]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE; s->n_chan = 2; s->maxdata = board->cnt_maxdata; s->insn_read = vmk80xx_cnt_insn_read; s->insn_config = vmk80xx_cnt_insn_config; if (devpriv->model == VMK8055_MODEL) { s->subdev_flags |= SDF_WRITABLE; s->insn_write = vmk80xx_cnt_insn_write; } /* PWM subdevice */ if (devpriv->model == VMK8061_MODEL) { s = &dev->subdevices[5]; s->type = COMEDI_SUBD_PWM; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = board->pwm_nchans; s->maxdata = board->pwm_maxdata; s->insn_read = vmk80xx_pwm_insn_read; s->insn_write = vmk80xx_pwm_insn_write; } up(&devpriv->limit_sem); return 0; } static int vmk80xx_auto_attach(struct comedi_device *dev, unsigned long context) { struct usb_interface *intf = comedi_to_usb_interface(dev); const struct vmk80xx_board *board = NULL; struct vmk80xx_private *devpriv; int ret; if (context < ARRAY_SIZE(vmk80xx_boardinfo)) board = &vmk80xx_boardinfo[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; devpriv->model = board->model; sema_init(&devpriv->limit_sem, 8); ret = vmk80xx_find_usb_endpoints(dev); if (ret) return ret; ret = vmk80xx_alloc_usb_buffers(dev); if (ret) return ret; usb_set_intfdata(intf, devpriv); if (devpriv->model == VMK8055_MODEL) vmk80xx_reset_device(dev); return vmk80xx_init_subdevices(dev); } static void vmk80xx_detach(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct vmk80xx_private *devpriv = dev->private; if (!devpriv) return; down(&devpriv->limit_sem); usb_set_intfdata(intf, NULL); kfree(devpriv->usb_rx_buf); kfree(devpriv->usb_tx_buf); up(&devpriv->limit_sem); } static struct comedi_driver vmk80xx_driver = { .module = THIS_MODULE, .driver_name = "vmk80xx", .auto_attach = vmk80xx_auto_attach, .detach = vmk80xx_detach, }; static int vmk80xx_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return comedi_usb_auto_config(intf, &vmk80xx_driver, id->driver_info); } static const struct usb_device_id vmk80xx_usb_id_table[] = { { USB_DEVICE(0x10cf, 0x5500), .driver_info = DEVICE_VMK8055 }, { USB_DEVICE(0x10cf, 0x5501), .driver_info = DEVICE_VMK8055 }, { USB_DEVICE(0x10cf, 0x5502), .driver_info = DEVICE_VMK8055 }, { USB_DEVICE(0x10cf, 0x5503), .driver_info = DEVICE_VMK8055 }, { USB_DEVICE(0x10cf, 0x8061), .driver_info = DEVICE_VMK8061 }, { USB_DEVICE(0x10cf, 0x8062), .driver_info = DEVICE_VMK8061 }, { USB_DEVICE(0x10cf, 0x8063), .driver_info = DEVICE_VMK8061 }, { USB_DEVICE(0x10cf, 0x8064), .driver_info = DEVICE_VMK8061 }, { USB_DEVICE(0x10cf, 0x8065), .driver_info = DEVICE_VMK8061 }, { USB_DEVICE(0x10cf, 0x8066), .driver_info = DEVICE_VMK8061 }, { USB_DEVICE(0x10cf, 0x8067), .driver_info = DEVICE_VMK8061 }, { USB_DEVICE(0x10cf, 0x8068), .driver_info = DEVICE_VMK8061 }, { } }; MODULE_DEVICE_TABLE(usb, vmk80xx_usb_id_table); static struct usb_driver vmk80xx_usb_driver = { .name = "vmk80xx", .id_table = vmk80xx_usb_id_table, .probe = vmk80xx_usb_probe, .disconnect = comedi_usb_auto_unconfig, }; module_comedi_usb_driver(vmk80xx_driver, vmk80xx_usb_driver); MODULE_AUTHOR("Manuel Gebele <forensixs@gmx.de>"); MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver"); MODULE_LICENSE("GPL"); |
4 4 4 5 5 5 5 5 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 10 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 | // SPDX-License-Identifier: GPL-2.0-only /* Common methods for dibusb-based-receivers. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS); MODULE_DESCRIPTION("Common methods for dibusb-based receivers"); MODULE_LICENSE("GPL"); #define deb_info(args...) dprintk(debug,0x01,args) /* common stuff used by the different dibusb modules */ int dibusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.fifo_ctrl != NULL) if (st->ops.fifo_ctrl(adap->fe_adap[0].fe, onoff)) { err("error while controlling the fifo of the demod."); return -ENODEV; } } return 0; } EXPORT_SYMBOL(dibusb_streaming_ctrl); int dibusb_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.pid_ctrl != NULL) st->ops.pid_ctrl(adap->fe_adap[0].fe, index, pid, onoff); } return 0; } EXPORT_SYMBOL(dibusb_pid_filter); int dibusb_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.pid_parse != NULL) if (st->ops.pid_parse(adap->fe_adap[0].fe, onoff) < 0) err("could not handle pid_parser"); } return 0; } EXPORT_SYMBOL(dibusb_pid_filter_ctrl); int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 *b; int ret; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP; ret = dvb_usb_generic_write(d, b, 3); kfree(b); msleep(10); return ret; } EXPORT_SYMBOL(dibusb_power_ctrl); int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { int ret; u8 *b; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0) goto ret; if (onoff) { b[0] = DIBUSB_REQ_SET_STREAMING_MODE; b[1] = 0x00; ret = dvb_usb_generic_write(adap->dev, b, 2); if (ret < 0) goto ret; } b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; ret = dvb_usb_generic_write(adap->dev, b, 3); ret: kfree(b); return ret; } EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 *b; int ret; if (!onoff) return 0; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; b[2] = DIBUSB_IOCTL_POWER_WAKEUP; ret = dvb_usb_generic_write(d, b, 3); kfree(b); return ret; } EXPORT_SYMBOL(dibusb2_0_power_ctrl); static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 *sndbuf; int ret, wo, len; /* write only ? */ wo = (rbuf == NULL || rlen == 0); len = 2 + wlen + (wo ? 0 : 2); sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL); if (!sndbuf) return -ENOMEM; if (4 + wlen > MAX_XFER_SIZE) { warn("i2c wr: len=%d is too big!\n", wlen); ret = -EOPNOTSUPP; goto ret; } sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; sndbuf[1] = (addr << 1) | (wo ? 0 : 1); memcpy(&sndbuf[2], wbuf, wlen); if (!wo) { sndbuf[wlen + 2] = (rlen >> 8) & 0xff; sndbuf[wlen + 3] = rlen & 0xff; } ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0); ret: kfree(sndbuf); return ret; } /* * I2C master xfer function */ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i].flags & I2C_M_RD) == 0 && (msg[i+1].flags & I2C_M_RD)) { if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len, msg[i+1].buf,msg[i+1].len) < 0) break; i++; } else if ((msg[i].flags & I2C_M_RD) == 0) { if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0) break; } else if (msg[i].addr != 0x50) { /* 0x50 is the address of the eeprom - we need to protect it * from dibusb's bad i2c implementation: reads without * writing the offset before are forbidden */ if (dibusb_i2c_msg(d, msg[i].addr, NULL, 0, msg[i].buf, msg[i].len) < 0) break; } } mutex_unlock(&d->i2c_mutex); return i; } static u32 dibusb_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } struct i2c_algorithm dibusb_i2c_algo = { .master_xfer = dibusb_i2c_xfer, .functionality = dibusb_i2c_func, }; EXPORT_SYMBOL(dibusb_i2c_algo); int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) { u8 *buf; int rc; buf = kzalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = offs; rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1); *val = buf[1]; kfree(buf); return rc; } EXPORT_SYMBOL(dibusb_read_eeprom_byte); /* * common remote control stuff */ struct rc_map_table rc_map_dibusb_table[] = { /* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */ { 0x0016, KEY_POWER }, { 0x0010, KEY_MUTE }, { 0x0003, KEY_1 }, { 0x0001, KEY_2 }, { 0x0006, KEY_3 }, { 0x0009, KEY_4 }, { 0x001d, KEY_5 }, { 0x001f, KEY_6 }, { 0x000d, KEY_7 }, { 0x0019, KEY_8 }, { 0x001b, KEY_9 }, { 0x0015, KEY_0 }, { 0x0005, KEY_CHANNELUP }, { 0x0002, KEY_CHANNELDOWN }, { 0x001e, KEY_VOLUMEUP }, { 0x000a, KEY_VOLUMEDOWN }, { 0x0011, KEY_RECORD }, { 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */ { 0x0014, KEY_PLAY }, { 0x001a, KEY_STOP }, { 0x0040, KEY_REWIND }, { 0x0012, KEY_FASTFORWARD }, { 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */ { 0x004c, KEY_PAUSE }, { 0x004d, KEY_SCREEN }, /* Full screen mode. */ { 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */ /* additional keys TwinHan VisionPlus, the Artec seemingly not have */ { 0x000c, KEY_CANCEL }, /* Cancel */ { 0x001c, KEY_EPG }, /* EPG */ { 0x0000, KEY_TAB }, /* Tab */ { 0x0048, KEY_INFO }, /* Preview */ { 0x0004, KEY_LIST }, /* RecordList */ { 0x000f, KEY_TEXT }, /* Teletext */ /* Key codes for the KWorld/ADSTech/JetWay remote. */ { 0x8612, KEY_POWER }, { 0x860f, KEY_SELECT }, /* source */ { 0x860c, KEY_UNKNOWN }, /* scan */ { 0x860b, KEY_EPG }, { 0x8610, KEY_MUTE }, { 0x8601, KEY_1 }, { 0x8602, KEY_2 }, { 0x8603, KEY_3 }, { 0x8604, KEY_4 }, { 0x8605, KEY_5 }, { 0x8606, KEY_6 }, { 0x8607, KEY_7 }, { 0x8608, KEY_8 }, { 0x8609, KEY_9 }, { 0x860a, KEY_0 }, { 0x8618, KEY_ZOOM }, { 0x861c, KEY_UNKNOWN }, /* preview */ { 0x8613, KEY_UNKNOWN }, /* snap */ { 0x8600, KEY_UNDO }, { 0x861d, KEY_RECORD }, { 0x860d, KEY_STOP }, { 0x860e, KEY_PAUSE }, { 0x8616, KEY_PLAY }, { 0x8611, KEY_BACK }, { 0x8619, KEY_FORWARD }, { 0x8614, KEY_UNKNOWN }, /* pip */ { 0x8615, KEY_ESC }, { 0x861a, KEY_UP }, { 0x861e, KEY_DOWN }, { 0x861f, KEY_LEFT }, { 0x861b, KEY_RIGHT }, /* Key codes for the DiBcom MOD3000 remote. */ { 0x8000, KEY_MUTE }, { 0x8001, KEY_TEXT }, { 0x8002, KEY_HOME }, { 0x8003, KEY_POWER }, { 0x8004, KEY_RED }, { 0x8005, KEY_GREEN }, { 0x8006, KEY_YELLOW }, { 0x8007, KEY_BLUE }, { 0x8008, KEY_DVD }, { 0x8009, KEY_AUDIO }, { 0x800a, KEY_IMAGES }, /* Pictures */ { 0x800b, KEY_VIDEO }, { 0x800c, KEY_BACK }, { 0x800d, KEY_UP }, { 0x800e, KEY_RADIO }, { 0x800f, KEY_EPG }, { 0x8010, KEY_LEFT }, { 0x8011, KEY_OK }, { 0x8012, KEY_RIGHT }, { 0x8013, KEY_UNKNOWN }, /* SAP */ { 0x8014, KEY_TV }, { 0x8015, KEY_DOWN }, { 0x8016, KEY_MENU }, /* DVD Menu */ { 0x8017, KEY_LAST }, { 0x8018, KEY_RECORD }, { 0x8019, KEY_STOP }, { 0x801a, KEY_PAUSE }, { 0x801b, KEY_PLAY }, { 0x801c, KEY_PREVIOUS }, { 0x801d, KEY_REWIND }, { 0x801e, KEY_FASTFORWARD }, { 0x801f, KEY_NEXT}, { 0x8040, KEY_1 }, { 0x8041, KEY_2 }, { 0x8042, KEY_3 }, { 0x8043, KEY_CHANNELUP }, { 0x8044, KEY_4 }, { 0x8045, KEY_5 }, { 0x8046, KEY_6 }, { 0x8047, KEY_CHANNELDOWN }, { 0x8048, KEY_7 }, { 0x8049, KEY_8 }, { 0x804a, KEY_9 }, { 0x804b, KEY_VOLUMEUP }, { 0x804c, KEY_CLEAR }, { 0x804d, KEY_0 }, { 0x804e, KEY_ENTER }, { 0x804f, KEY_VOLUMEDOWN }, }; EXPORT_SYMBOL(rc_map_dibusb_table); int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { u8 *buf; int ret; buf = kmalloc(5, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = DIBUSB_REQ_POLL_REMOTE; ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0); if (ret < 0) goto ret; dvb_usb_nec_rc_key_to_event(d, buf, event, state); if (buf[0] != 0) deb_info("key: %*ph\n", 5, buf); ret: kfree(buf); return ret; } EXPORT_SYMBOL(dibusb_rc_query); |
15 15 1 14 14 6 8 8 8 1 8 1 4 4 1 1 1 1 15 1 1 15 1 15 1 15 3 12 1 15 10 10 1 17 17 4 11 11 11 11 10 10 10 11 1 10 2 12 17 16 16 1 1 15 14 14 16 1 3 3 1 3 20 16 3 1 20 19 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 | // SPDX-License-Identifier: GPL-2.0-or-later /* */ #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <linux/usb/audio-v3.h> #include <sound/core.h> #include <sound/pcm.h> #include "usbaudio.h" #include "card.h" #include "quirks.h" #include "helper.h" #include "clock.h" #include "format.h" /* * parse the audio format type I descriptor * and returns the corresponding pcm format * * @dev: usb device * @fp: audioformat record * @format: the format tag (wFormatTag) * @fmt: the format type descriptor (v1/v2) or AudioStreaming descriptor (v3) */ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, void *_fmt) { int sample_width, sample_bytes; u64 pcm_formats = 0; switch (fp->protocol) { case UAC_VERSION_1: default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; if (format >= 64) { usb_audio_info(chip, "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n", fp->iface, fp->altsetting, format); format = UAC_FORMAT_TYPE_I_PCM; } sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; format = 1ULL << format; break; } case UAC_VERSION_2: { struct uac_format_type_i_ext_descriptor *fmt = _fmt; sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubslotSize; if (format & UAC2_FORMAT_TYPE_I_RAW_DATA) { pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL; /* flag potentially raw DSD capable altsettings */ fp->dsd_raw = true; /* clear special format bit to avoid "unsupported format" msg below */ format &= ~UAC2_FORMAT_TYPE_I_RAW_DATA; } format <<= 1; break; } case UAC_VERSION_3: { struct uac3_as_header_descriptor *as = _fmt; sample_width = as->bBitResolution; sample_bytes = as->bSubslotSize; if (format & UAC3_FORMAT_TYPE_I_RAW_DATA) { pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL; /* clear special format bit to avoid "unsupported format" msg below */ format &= ~UAC3_FORMAT_TYPE_I_RAW_DATA; } format <<= 1; break; } } fp->fmt_bits = sample_width; fp->fmt_sz = sample_bytes; if ((pcm_formats == 0) && (format == 0 || format == BIT(UAC_FORMAT_TYPE_I_UNDEFINED))) { /* some devices don't define this correctly... */ usb_audio_info(chip, "%u:%d : format type 0 is detected, processed as PCM\n", fp->iface, fp->altsetting); format = BIT(UAC_FORMAT_TYPE_I_PCM); } if (format & BIT(UAC_FORMAT_TYPE_I_PCM)) { if (((chip->usb_id == USB_ID(0x0582, 0x0016)) || /* Edirol SD-90 */ (chip->usb_id == USB_ID(0x0582, 0x000c))) && /* Roland SC-D70 */ sample_width == 24 && sample_bytes == 2) sample_bytes = 3; else if (sample_width > sample_bytes * 8) { usb_audio_info(chip, "%u:%d : sample bitwidth %d in over sample bytes %d\n", fp->iface, fp->altsetting, sample_width, sample_bytes); } /* check the format byte size */ switch (sample_bytes) { case 1: pcm_formats |= SNDRV_PCM_FMTBIT_S8; break; case 2: if (snd_usb_is_big_endian_format(chip, fp)) pcm_formats |= SNDRV_PCM_FMTBIT_S16_BE; /* grrr, big endian!! */ else pcm_formats |= SNDRV_PCM_FMTBIT_S16_LE; break; case 3: if (snd_usb_is_big_endian_format(chip, fp)) pcm_formats |= SNDRV_PCM_FMTBIT_S24_3BE; /* grrr, big endian!! */ else pcm_formats |= SNDRV_PCM_FMTBIT_S24_3LE; break; case 4: pcm_formats |= SNDRV_PCM_FMTBIT_S32_LE; break; default: usb_audio_info(chip, "%u:%d : unsupported sample bitwidth %d in %d bytes\n", fp->iface, fp->altsetting, sample_width, sample_bytes); break; } } if (format & BIT(UAC_FORMAT_TYPE_I_PCM8)) { /* Dallas DS4201 workaround: it advertises U8 format, but really supports S8. */ if (chip->usb_id == USB_ID(0x04fa, 0x4201)) pcm_formats |= SNDRV_PCM_FMTBIT_S8; else pcm_formats |= SNDRV_PCM_FMTBIT_U8; } if (format & BIT(UAC_FORMAT_TYPE_I_IEEE_FLOAT)) pcm_formats |= SNDRV_PCM_FMTBIT_FLOAT_LE; if (format & BIT(UAC_FORMAT_TYPE_I_ALAW)) pcm_formats |= SNDRV_PCM_FMTBIT_A_LAW; if (format & BIT(UAC_FORMAT_TYPE_I_MULAW)) pcm_formats |= SNDRV_PCM_FMTBIT_MU_LAW; if (format & ~0x3f) { usb_audio_info(chip, "%u:%d : unsupported format bits %#llx\n", fp->iface, fp->altsetting, format); } pcm_formats |= snd_usb_interface_dsd_format_quirks(chip, fp, sample_bytes); return pcm_formats; } static int set_fixed_rate(struct audioformat *fp, int rate, int rate_bits) { kfree(fp->rate_table); fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL); if (!fp->rate_table) return -ENOMEM; fp->nr_rates = 1; fp->rate_min = rate; fp->rate_max = rate; fp->rates = rate_bits; fp->rate_table[0] = rate; return 0; } /* set up rate_min, rate_max and rates from the rate table */ static void set_rate_table_min_max(struct audioformat *fp) { unsigned int rate; int i; fp->rate_min = INT_MAX; fp->rate_max = 0; fp->rates = 0; for (i = 0; i < fp->nr_rates; i++) { rate = fp->rate_table[i]; fp->rate_min = min(fp->rate_min, rate); fp->rate_max = max(fp->rate_max, rate); fp->rates |= snd_pcm_rate_to_rate_bit(rate); } } /* * parse the format descriptor and stores the possible sample rates * on the audioformat table (audio class v1). * * @dev: usb device * @fp: audioformat record * @fmt: the format descriptor * @offset: the start offset of descriptor pointing the rate type * (7 for type I and II, 8 for type II) */ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audioformat *fp, unsigned char *fmt, int offset) { int nr_rates = fmt[offset]; if (fmt[0] < offset + 1 + 3 * (nr_rates ? nr_rates : 2)) { usb_audio_err(chip, "%u:%d : invalid UAC_FORMAT_TYPE desc\n", fp->iface, fp->altsetting); return -EINVAL; } if (nr_rates) { /* * build the rate table and bitmap flags */ int r, idx; fp->rate_table = kmalloc_array(nr_rates, sizeof(int), GFP_KERNEL); if (fp->rate_table == NULL) return -ENOMEM; fp->nr_rates = 0; for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) { unsigned int rate = combine_triple(&fmt[idx]); if (!rate) continue; /* C-Media CM6501 mislabels its 96 kHz altsetting */ /* Terratec Aureon 7.1 USB C-Media 6206, too */ /* Ozone Z90 USB C-Media, too */ if (rate == 48000 && nr_rates == 1 && (chip->usb_id == USB_ID(0x0d8c, 0x0201) || chip->usb_id == USB_ID(0x0d8c, 0x0102) || chip->usb_id == USB_ID(0x0d8c, 0x0078) || chip->usb_id == USB_ID(0x0ccd, 0x00b1)) && fp->altsetting == 5 && fp->maxpacksize == 392) rate = 96000; /* Creative VF0420/VF0470 Live Cams report 16 kHz instead of 8kHz */ if (rate == 16000 && (chip->usb_id == USB_ID(0x041e, 0x4064) || chip->usb_id == USB_ID(0x041e, 0x4068))) rate = 8000; fp->rate_table[fp->nr_rates++] = rate; } if (!fp->nr_rates) { usb_audio_info(chip, "%u:%d: All rates were zero\n", fp->iface, fp->altsetting); return -EINVAL; } set_rate_table_min_max(fp); } else { /* continuous rates */ fp->rates = SNDRV_PCM_RATE_CONTINUOUS; fp->rate_min = combine_triple(&fmt[offset + 1]); fp->rate_max = combine_triple(&fmt[offset + 4]); } /* Jabra Evolve 65 headset */ if (chip->usb_id == USB_ID(0x0b0e, 0x030b) || chip->usb_id == USB_ID(0x0b0e, 0x030c)) { /* only 48kHz for playback while keeping 16kHz for capture */ if (fp->nr_rates != 1) return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); } return 0; } /* * Presonus Studio 1810c supports a limited set of sampling * rates per altsetting but reports the full set each time. * If we don't filter out the unsupported rates and attempt * to configure the card, it will hang refusing to do any * further audio I/O until a hard reset is performed. * * The list of supported rates per altsetting (set of available * I/O channels) is described in the owner's manual, section 2.2. */ static bool s1810c_valid_sample_rate(struct audioformat *fp, unsigned int rate) { switch (fp->altsetting) { case 1: /* All ADAT ports available */ return rate <= 48000; case 2: /* Half of ADAT ports available */ return (rate == 88200 || rate == 96000); case 3: /* Analog I/O only (no S/PDIF nor ADAT) */ return rate >= 176400; default: return false; } return false; } /* * Many Focusrite devices supports a limited set of sampling rates per * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type * descriptor which has a non-standard bLength = 10. */ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip, struct audioformat *fp, unsigned int rate) { struct usb_interface *iface; struct usb_host_interface *alts; unsigned char *fmt; unsigned int max_rate; iface = usb_ifnum_to_if(chip->dev, fp->iface); if (!iface) return true; alts = &iface->altsetting[fp->altset_idx]; fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE); if (!fmt) return true; if (fmt[0] == 10) { /* bLength */ max_rate = combine_quad(&fmt[6]); /* Validate max rate */ if (max_rate != 48000 && max_rate != 96000 && max_rate != 192000 && max_rate != 384000) { usb_audio_info(chip, "%u:%d : unexpected max rate: %u\n", fp->iface, fp->altsetting, max_rate); return true; } return rate <= max_rate; } return true; } /* * Helper function to walk the array of sample rate triplets reported by * the device. The problem is that we need to parse whole array first to * get to know how many sample rates we have to expect. * Then fp->rate_table can be allocated and filled. */ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, struct audioformat *fp, int nr_triplets, const unsigned char *data) { int i, nr_rates = 0; for (i = 0; i < nr_triplets; i++) { int min = combine_quad(&data[2 + 12 * i]); int max = combine_quad(&data[6 + 12 * i]); int res = combine_quad(&data[10 + 12 * i]); unsigned int rate; if ((max < 0) || (min < 0) || (res < 0) || (max < min)) continue; /* * for ranges with res == 1, we announce a continuous sample * rate range, and this function should return 0 for no further * parsing. */ if (res == 1) { fp->rate_min = min; fp->rate_max = max; fp->rates = SNDRV_PCM_RATE_CONTINUOUS; return 0; } for (rate = min; rate <= max; rate += res) { /* Filter out invalid rates on Presonus Studio 1810c */ if (chip->usb_id == USB_ID(0x194f, 0x010c) && !s1810c_valid_sample_rate(fp, rate)) goto skip_rate; /* Filter out invalid rates on Presonus Studio 1824c */ if (chip->usb_id == USB_ID(0x194f, 0x010d) && !s1810c_valid_sample_rate(fp, rate)) goto skip_rate; /* Filter out invalid rates on Focusrite devices */ if (USB_ID_VENDOR(chip->usb_id) == 0x1235 && !focusrite_valid_sample_rate(chip, fp, rate)) goto skip_rate; if (fp->rate_table) fp->rate_table[nr_rates] = rate; nr_rates++; if (nr_rates >= MAX_NR_RATES) { usb_audio_err(chip, "invalid uac2 rates\n"); break; } skip_rate: /* avoid endless loop */ if (res == 0) break; } } return nr_rates; } /* Line6 Helix series and the Rode Rodecaster Pro don't support the * UAC2_CS_RANGE usb function call. Return a static table of known * clock rates. */ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip, struct audioformat *fp) { switch (chip->usb_id) { case USB_ID(0x0e41, 0x4241): /* Line6 Helix */ case USB_ID(0x0e41, 0x4242): /* Line6 Helix Rack */ case USB_ID(0x0e41, 0x4244): /* Line6 Helix LT */ case USB_ID(0x0e41, 0x4246): /* Line6 HX-Stomp */ case USB_ID(0x0e41, 0x4253): /* Line6 HX-Stomp XL */ case USB_ID(0x0e41, 0x4247): /* Line6 Pod Go */ case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */ case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */ case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */ case USB_ID(0x0e41, 0x424b): /* Line6 Pod Go */ case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */ return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); } return -ENODEV; } /* check whether the given altsetting is supported for the already set rate */ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface, int altsetting) { struct usb_device *dev = chip->dev; __le64 raw_data = 0; u64 data; int err; /* we assume 64bit is enough for any altsettings */ if (snd_BUG_ON(altsetting >= 64 - 8)) return false; err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_AS_VAL_ALT_SETTINGS << 8, iface, &raw_data, sizeof(raw_data)); if (err < 0) return false; data = le64_to_cpu(raw_data); /* first byte contains the bitmap size */ if ((data & 0xff) * 8 < altsetting) return false; if (data & (1ULL << (altsetting + 8))) return true; return false; } /* * Validate each sample rate with the altsetting * Rebuild the rate table if only partial values are valid */ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip, struct audioformat *fp, int clock) { struct usb_device *dev = chip->dev; struct usb_host_interface *alts; unsigned int *table; unsigned int nr_rates; int i, err; u32 bmControls; /* performing the rate verification may lead to unexpected USB bus * behavior afterwards by some unknown reason. Do this only for the * known devices. */ if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES)) return 0; /* don't perform the validation as default */ alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting); if (!alts) return 0; if (fp->protocol == UAC_VERSION_3) { struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc( alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); bmControls = le32_to_cpu(as->bmControls); } else { struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc( alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); bmControls = as->bmControls; } if (!uac_v2v3_control_is_readable(bmControls, UAC2_AS_VAL_ALT_SETTINGS)) return 0; table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; /* clear the interface altsetting at first */ usb_set_interface(dev, fp->iface, 0); nr_rates = 0; for (i = 0; i < fp->nr_rates; i++) { err = snd_usb_set_sample_rate_v2v3(chip, fp, clock, fp->rate_table[i]); if (err < 0) continue; if (check_valid_altsetting_v2v3(chip, fp->iface, fp->altsetting)) table[nr_rates++] = fp->rate_table[i]; } if (!nr_rates) { usb_audio_dbg(chip, "No valid sample rate available for %d:%d, assuming a firmware bug\n", fp->iface, fp->altsetting); nr_rates = fp->nr_rates; /* continue as is */ } if (fp->nr_rates == nr_rates) { kfree(table); return 0; } kfree(fp->rate_table); fp->rate_table = table; fp->nr_rates = nr_rates; return 0; } /* * parse the format descriptor and stores the possible sample rates * on the audioformat table (audio class v2 and v3). */ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip, struct audioformat *fp) { struct usb_device *dev = chip->dev; unsigned char tmp[2], *data; int nr_triplets, data_size, ret = 0, ret_l6; int clock = snd_usb_clock_find_source(chip, fp, false); struct usb_host_interface *ctrl_intf; ctrl_intf = snd_usb_find_ctrl_interface(chip, fp->iface); if (clock < 0) { dev_err(&dev->dev, "%s(): unable to find clock source (clock %d)\n", __func__, clock); goto err; } /* get the number of sample rates first by only fetching 2 bytes */ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_CS_CONTROL_SAM_FREQ << 8, snd_usb_ctrl_intf(ctrl_intf) | (clock << 8), tmp, sizeof(tmp)); if (ret < 0) { /* line6 helix devices don't support UAC2_CS_CONTROL_SAM_FREQ call */ ret_l6 = line6_parse_audio_format_rates_quirk(chip, fp); if (ret_l6 == -ENODEV) { /* no line6 device found continue showing the error */ dev_err(&dev->dev, "%s(): unable to retrieve number of sample rates (clock %d)\n", __func__, clock); goto err; } if (ret_l6 == 0) { dev_info(&dev->dev, "%s(): unable to retrieve number of sample rates: set it to a predefined value (clock %d).\n", __func__, clock); return 0; } ret = ret_l6; goto err; } nr_triplets = (tmp[1] << 8) | tmp[0]; data_size = 2 + 12 * nr_triplets; data = kzalloc(data_size, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto err; } /* now get the full information */ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_CS_CONTROL_SAM_FREQ << 8, snd_usb_ctrl_intf(ctrl_intf) | (clock << 8), data, data_size); if (ret < 0) { dev_err(&dev->dev, "%s(): unable to retrieve sample rate range (clock %d)\n", __func__, clock); ret = -EINVAL; goto err_free; } /* Call the triplet parser, and make sure fp->rate_table is NULL. * We just use the return value to know how many sample rates we * will have to deal with. */ kfree(fp->rate_table); fp->rate_table = NULL; fp->nr_rates = parse_uac2_sample_rate_range(chip, fp, nr_triplets, data); if (fp->nr_rates == 0) { /* SNDRV_PCM_RATE_CONTINUOUS */ ret = 0; goto err_free; } fp->rate_table = kmalloc_array(fp->nr_rates, sizeof(int), GFP_KERNEL); if (!fp->rate_table) { ret = -ENOMEM; goto err_free; } /* Call the triplet parser again, but this time, fp->rate_table is * allocated, so the rates will be stored */ parse_uac2_sample_rate_range(chip, fp, nr_triplets, data); ret = validate_sample_rate_table_v2v3(chip, fp, clock); if (ret < 0) goto err_free; set_rate_table_min_max(fp); err_free: kfree(data); err: return ret; } /* * parse the format type I and III descriptors */ static int parse_audio_format_i(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, void *_fmt) { snd_pcm_format_t pcm_format; unsigned int fmt_type; int ret; switch (fp->protocol) { default: case UAC_VERSION_1: case UAC_VERSION_2: { struct uac_format_type_i_continuous_descriptor *fmt = _fmt; fmt_type = fmt->bFormatType; break; } case UAC_VERSION_3: { /* fp->fmt_type is already set in this case */ fmt_type = fp->fmt_type; break; } } if (fmt_type == UAC_FORMAT_TYPE_III) { /* FIXME: the format type is really IECxxx * but we give normal PCM format to get the existing * apps working... */ switch (chip->usb_id) { case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */ if (chip->setup == 0x00 && fp->altsetting == 6) pcm_format = SNDRV_PCM_FORMAT_S16_BE; else pcm_format = SNDRV_PCM_FORMAT_S16_LE; break; default: pcm_format = SNDRV_PCM_FORMAT_S16_LE; } fp->formats = pcm_format_to_bits(pcm_format); } else { fp->formats = parse_audio_format_i_type(chip, fp, format, _fmt); if (!fp->formats) return -EINVAL; } /* gather possible sample rates */ /* audio class v1 reports possible sample rates as part of the * proprietary class specific descriptor. * audio class v2 uses class specific EP0 range requests for that. */ switch (fp->protocol) { default: case UAC_VERSION_1: { struct uac_format_type_i_continuous_descriptor *fmt = _fmt; fp->channels = fmt->bNrChannels; ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); break; } case UAC_VERSION_2: case UAC_VERSION_3: { /* fp->channels is already set in this case */ ret = parse_audio_format_rates_v2v3(chip, fp); break; } } if (fp->channels < 1) { usb_audio_err(chip, "%u:%d : invalid channels %d\n", fp->iface, fp->altsetting, fp->channels); return -EINVAL; } return ret; } /* * parse the format type II descriptor */ static int parse_audio_format_ii(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, void *_fmt) { int brate, framesize, ret; switch (format) { case UAC_FORMAT_TYPE_II_AC3: /* FIXME: there is no AC3 format defined yet */ // fp->formats = SNDRV_PCM_FMTBIT_AC3; fp->formats = SNDRV_PCM_FMTBIT_U8; /* temporary hack to receive byte streams */ break; case UAC_FORMAT_TYPE_II_MPEG: fp->formats = SNDRV_PCM_FMTBIT_MPEG; break; default: usb_audio_info(chip, "%u:%d : unknown format tag %#llx is detected. processed as MPEG.\n", fp->iface, fp->altsetting, format); fp->formats = SNDRV_PCM_FMTBIT_MPEG; break; } fp->channels = 1; switch (fp->protocol) { default: case UAC_VERSION_1: { struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; brate = le16_to_cpu(fmt->wMaxBitRate); framesize = le16_to_cpu(fmt->wSamplesPerFrame); usb_audio_info(chip, "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize); fp->frame_size = framesize; ret = parse_audio_format_rates_v1(chip, fp, _fmt, 8); /* fmt[8..] sample rates */ break; } case UAC_VERSION_2: { struct uac_format_type_ii_ext_descriptor *fmt = _fmt; brate = le16_to_cpu(fmt->wMaxBitRate); framesize = le16_to_cpu(fmt->wSamplesPerFrame); usb_audio_info(chip, "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize); fp->frame_size = framesize; ret = parse_audio_format_rates_v2v3(chip, fp); break; } } return ret; } int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, struct uac_format_type_i_continuous_descriptor *fmt, int stream) { int err; switch (fmt->bFormatType) { case UAC_FORMAT_TYPE_I: case UAC_FORMAT_TYPE_III: err = parse_audio_format_i(chip, fp, format, fmt); break; case UAC_FORMAT_TYPE_II: err = parse_audio_format_ii(chip, fp, format, fmt); break; default: usb_audio_info(chip, "%u:%d : format type %d is not supported yet\n", fp->iface, fp->altsetting, fmt->bFormatType); return -ENOTSUPP; } fp->fmt_type = fmt->bFormatType; if (err < 0) return err; #if 1 /* FIXME: temporary hack for extigy/audigy 2 nx/zs */ /* extigy apparently supports sample rates other than 48k * but not in ordinary way. so we enable only 48k atm. */ if (chip->usb_id == USB_ID(0x041e, 0x3000) || chip->usb_id == USB_ID(0x041e, 0x3020) || chip->usb_id == USB_ID(0x041e, 0x3061)) { if (fmt->bFormatType == UAC_FORMAT_TYPE_I && fp->rates != SNDRV_PCM_RATE_48000 && fp->rates != SNDRV_PCM_RATE_96000) return -ENOTSUPP; } #endif return 0; } int snd_usb_parse_audio_format_v3(struct snd_usb_audio *chip, struct audioformat *fp, struct uac3_as_header_descriptor *as, int stream) { u64 format = le64_to_cpu(as->bmFormats); int err; /* * Type I format bits are D0..D6 * This test works because type IV is not supported */ if (format & 0x7f) fp->fmt_type = UAC_FORMAT_TYPE_I; else fp->fmt_type = UAC_FORMAT_TYPE_III; err = parse_audio_format_i(chip, fp, format, as); if (err < 0) return err; return 0; } |
23 21 21 23 23 23 6 6 5 3 6 6 4 4 4 2 4 4 4 4 10 8 8 8 7 5 4 3 10 33 1 1 1 1 1 5 4 4 4 2 1 1 1 1 5 7 7 8 8 8 8 7 7 7 6 2 4 4 2 4 3 5 5 6 5 1 5 5 2 3 2 2 1 5 6 8 7 8 8 3 2 2 8 8 8 8 3 5 5 5 1 4 5 5 5 5 4 7 7 4 4 7 24 25 11 2 37 25 25 25 25 25 25 25 21 21 21 25 4 25 25 2 25 24 15 9 24 21 1 21 20 25 21 25 24 16 21 21 21 21 21 21 17 17 5 21 37 37 37 33 37 4 3 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 | // SPDX-License-Identifier: GPL-2.0+ /* * usblp.c * * Copyright (c) 1999 Michael Gee <michael@linuxspecific.com> * Copyright (c) 1999 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2000 Randy Dunlap <rdunlap@xenotime.net> * Copyright (c) 2000 Vojtech Pavlik <vojtech@suse.cz> # Copyright (c) 2001 Pete Zaitcev <zaitcev@redhat.com> # Copyright (c) 2001 David Paschal <paschal@rcsis.com> * Copyright (c) 2006 Oliver Neukum <oliver@neukum.name> * * USB Printer Device Class driver for USB printers and printer cables * * Sponsored by SuSE * * ChangeLog: * v0.1 - thorough cleaning, URBification, almost a rewrite * v0.2 - some more cleanups * v0.3 - cleaner again, waitqueue fixes * v0.4 - fixes in unidirectional mode * v0.5 - add DEVICE_ID string support * v0.6 - never time out * v0.7 - fixed bulk-IN read and poll (David Paschal) * v0.8 - add devfs support * v0.9 - fix unplug-while-open paths * v0.10- remove sleep_on, fix error on oom (oliver@neukum.org) * v0.11 - add proto_bias option (Pete Zaitcev) * v0.12 - add hpoj.sourceforge.net ioctls (David Paschal) * v0.13 - alloc space for statusbuf (<status> not on stack); * use usb_alloc_coherent() for read buf & write buf; * none - Maintained in Linux kernel after v0.13 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/lp.h> #include <linux/mutex.h> #undef DEBUG #include <linux/usb.h> #include <linux/usb/ch9.h> #include <linux/ratelimit.h> /* * Version Information */ #define DRIVER_AUTHOR "Michael Gee, Pavel Machek, Vojtech Pavlik, Randy Dunlap, Pete Zaitcev, David Paschal" #define DRIVER_DESC "USB Printer Device Class driver" #define USBLP_BUF_SIZE 8192 #define USBLP_BUF_SIZE_IN 1024 #define USBLP_DEVICE_ID_SIZE 1024 /* ioctls: */ #define IOCNR_GET_DEVICE_ID 1 #define IOCNR_GET_PROTOCOLS 2 #define IOCNR_SET_PROTOCOL 3 #define IOCNR_HP_SET_CHANNEL 4 #define IOCNR_GET_BUS_ADDRESS 5 #define IOCNR_GET_VID_PID 6 #define IOCNR_SOFT_RESET 7 /* Get device_id string: */ #define LPIOC_GET_DEVICE_ID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_DEVICE_ID, len) /* The following ioctls were added for http://hpoj.sourceforge.net: * Get two-int array: * [0]=current protocol * (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2, * 3=USB_CLASS_PRINTER/1/3), * [1]=supported protocol mask (mask&(1<<n)!=0 means * USB_CLASS_PRINTER/1/n supported): */ #define LPIOC_GET_PROTOCOLS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_PROTOCOLS, len) /* * Set protocol * (arg: 1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2, * 3=USB_CLASS_PRINTER/1/3): */ #define LPIOC_SET_PROTOCOL _IOC(_IOC_WRITE, 'P', IOCNR_SET_PROTOCOL, 0) /* Set channel number (HP Vendor-specific command): */ #define LPIOC_HP_SET_CHANNEL _IOC(_IOC_WRITE, 'P', IOCNR_HP_SET_CHANNEL, 0) /* Get two-int array: [0]=bus number, [1]=device address: */ #define LPIOC_GET_BUS_ADDRESS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_BUS_ADDRESS, len) /* Get two-int array: [0]=vendor ID, [1]=product ID: */ #define LPIOC_GET_VID_PID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_VID_PID, len) /* Perform class specific soft reset */ #define LPIOC_SOFT_RESET _IOC(_IOC_NONE, 'P', IOCNR_SOFT_RESET, 0) /* * A DEVICE_ID string may include the printer's serial number. * It should end with a semi-colon (';'). * An example from an HP 970C DeskJet printer is (this is one long string, * with the serial number changed): MFG:HEWLETT-PACKARD;MDL:DESKJET 970C;CMD:MLC,PCL,PML;CLASS:PRINTER;DESCRIPTION:Hewlett-Packard DeskJet 970C;SERN:US970CSEPROF;VSTATUS:$HB0$NC0,ff,DN,IDLE,CUT,K1,C0,DP,NR,KP000,CP027;VP:0800,FL,B0;VJ: ; */ /* * USB Printer Requests */ #define USBLP_REQ_GET_ID 0x00 #define USBLP_REQ_GET_STATUS 0x01 #define USBLP_REQ_RESET 0x02 #define USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST 0x00 /* HP Vendor-specific */ #define USBLP_MINORS 16 #define USBLP_MINOR_BASE 0 #define USBLP_CTL_TIMEOUT 5000 /* 5 seconds */ #define USBLP_FIRST_PROTOCOL 1 #define USBLP_LAST_PROTOCOL 3 #define USBLP_MAX_PROTOCOLS (USBLP_LAST_PROTOCOL+1) /* * some arbitrary status buffer size; * need a status buffer that is allocated via kmalloc(), not on stack */ #define STATUS_BUF_SIZE 8 /* * Locks down the locking order: * ->wmut locks wstatus. * ->mut locks the whole usblp, except [rw]complete, and thus, by indirection, * [rw]status. We only touch status when we know the side idle. * ->lock locks what interrupt accesses. */ struct usblp { struct usb_device *dev; /* USB device */ struct mutex wmut; struct mutex mut; spinlock_t lock; /* locks rcomplete, wcomplete */ char *readbuf; /* read transfer_buffer */ char *statusbuf; /* status transfer_buffer */ struct usb_anchor urbs; wait_queue_head_t rwait, wwait; int readcount; /* Counter for reads */ int ifnum; /* Interface number */ struct usb_interface *intf; /* The interface */ /* * Alternate-setting numbers and endpoints for each protocol * (USB_CLASS_PRINTER/1/{index=1,2,3}) that the device supports: */ struct { int alt_setting; struct usb_endpoint_descriptor *epwrite; struct usb_endpoint_descriptor *epread; } protocol[USBLP_MAX_PROTOCOLS]; int current_protocol; int minor; /* minor number of device */ int wcomplete, rcomplete; int wstatus; /* bytes written or error */ int rstatus; /* bytes ready or error */ unsigned int quirks; /* quirks flags */ unsigned int flags; /* mode flags */ unsigned char used; /* True if open */ unsigned char present; /* True if not disconnected */ unsigned char bidir; /* interface is bidirectional */ unsigned char no_paper; /* Paper Out happened */ unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */ /* first 2 bytes are (big-endian) length */ }; #ifdef DEBUG static void usblp_dump(struct usblp *usblp) { struct device *dev = &usblp->intf->dev; int p; dev_dbg(dev, "usblp=0x%p\n", usblp); dev_dbg(dev, "dev=0x%p\n", usblp->dev); dev_dbg(dev, "present=%d\n", usblp->present); dev_dbg(dev, "readbuf=0x%p\n", usblp->readbuf); dev_dbg(dev, "readcount=%d\n", usblp->readcount); dev_dbg(dev, "ifnum=%d\n", usblp->ifnum); for (p = USBLP_FIRST_PROTOCOL; p <= USBLP_LAST_PROTOCOL; p++) { dev_dbg(dev, "protocol[%d].alt_setting=%d\n", p, usblp->protocol[p].alt_setting); dev_dbg(dev, "protocol[%d].epwrite=%p\n", p, usblp->protocol[p].epwrite); dev_dbg(dev, "protocol[%d].epread=%p\n", p, usblp->protocol[p].epread); } dev_dbg(dev, "current_protocol=%d\n", usblp->current_protocol); dev_dbg(dev, "minor=%d\n", usblp->minor); dev_dbg(dev, "wstatus=%d\n", usblp->wstatus); dev_dbg(dev, "rstatus=%d\n", usblp->rstatus); dev_dbg(dev, "quirks=%d\n", usblp->quirks); dev_dbg(dev, "used=%d\n", usblp->used); dev_dbg(dev, "bidir=%d\n", usblp->bidir); dev_dbg(dev, "device_id_string=\"%s\"\n", usblp->device_id_string ? usblp->device_id_string + 2 : (unsigned char *)"(null)"); } #endif /* Quirks: various printer quirks are handled by this table & its flags. */ struct quirk_printer_struct { __u16 vendorId; __u16 productId; unsigned int quirks; }; #define USBLP_QUIRK_BIDIR 0x1 /* reports bidir but requires unidirectional mode (no INs/reads) */ #define USBLP_QUIRK_USB_INIT 0x2 /* needs vendor USB init string */ #define USBLP_QUIRK_BAD_CLASS 0x4 /* descriptor uses vendor-specific Class or SubClass */ static const struct quirk_printer_struct quirk_printers[] = { { 0x03f0, 0x0004, USBLP_QUIRK_BIDIR }, /* HP DeskJet 895C */ { 0x03f0, 0x0104, USBLP_QUIRK_BIDIR }, /* HP DeskJet 880C */ { 0x03f0, 0x0204, USBLP_QUIRK_BIDIR }, /* HP DeskJet 815C */ { 0x03f0, 0x0304, USBLP_QUIRK_BIDIR }, /* HP DeskJet 810C/812C */ { 0x03f0, 0x0404, USBLP_QUIRK_BIDIR }, /* HP DeskJet 830C */ { 0x03f0, 0x0504, USBLP_QUIRK_BIDIR }, /* HP DeskJet 885C */ { 0x03f0, 0x0604, USBLP_QUIRK_BIDIR }, /* HP DeskJet 840C */ { 0x03f0, 0x0804, USBLP_QUIRK_BIDIR }, /* HP DeskJet 816C */ { 0x03f0, 0x1104, USBLP_QUIRK_BIDIR }, /* HP Deskjet 959C */ { 0x0409, 0xefbe, USBLP_QUIRK_BIDIR }, /* NEC Picty900 (HP OEM) */ { 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */ { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */ { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */ { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */ { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */ { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */ { 0, 0 } }; static int usblp_wwait(struct usblp *usblp, int nonblock); static int usblp_wtest(struct usblp *usblp, int nonblock); static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock); static int usblp_rtest(struct usblp *usblp, int nonblock); static int usblp_submit_read(struct usblp *usblp); static int usblp_select_alts(struct usblp *usblp); static int usblp_set_protocol(struct usblp *usblp, int protocol); static int usblp_cache_device_id_string(struct usblp *usblp); /* forward reference to make our lives easier */ static struct usb_driver usblp_driver; static DEFINE_MUTEX(usblp_mutex); /* locks the existence of usblp's */ /* * Functions for usblp control messages. */ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, int recip, int value, void *buf, int len) { int retval; int index = usblp->ifnum; /* High byte has the interface index. Low byte has the alternate setting. */ if ((request == USBLP_REQ_GET_ID) && (type == USB_TYPE_CLASS)) index = (usblp->ifnum<<8)|usblp->protocol[usblp->current_protocol].alt_setting; retval = usb_control_msg(usblp->dev, dir ? usb_rcvctrlpipe(usblp->dev, 0) : usb_sndctrlpipe(usblp->dev, 0), request, type | dir | recip, value, index, buf, len, USBLP_CTL_TIMEOUT); dev_dbg(&usblp->intf->dev, "usblp_control_msg: rq: 0x%02x dir: %d recip: %d value: %d idx: %d len: %#x result: %d\n", request, !!dir, recip, value, index, len, retval); return retval < 0 ? retval : 0; } #define usblp_read_status(usblp, status)\ usblp_ctrl_msg(usblp, USBLP_REQ_GET_STATUS, USB_TYPE_CLASS, USB_DIR_IN, USB_RECIP_INTERFACE, 0, status, 1) #define usblp_get_id(usblp, config, id, maxlen)\ usblp_ctrl_msg(usblp, USBLP_REQ_GET_ID, USB_TYPE_CLASS, USB_DIR_IN, USB_RECIP_INTERFACE, config, id, maxlen) #define usblp_reset(usblp)\ usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0) static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel) { u8 *buf; int ret; buf = kzalloc(1, GFP_KERNEL); if (!buf) return -ENOMEM; ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buf, 1); if (ret == 0) *new_channel = buf[0]; kfree(buf); return ret; } /* * See the description for usblp_select_alts() below for the usage * explanation. Look into your /sys/kernel/debug/usb/devices and dmesg in * case of any trouble. */ static int proto_bias = -1; /* * URB callback. */ static void usblp_bulk_read(struct urb *urb) { struct usblp *usblp = urb->context; int status = urb->status; unsigned long flags; if (usblp->present && usblp->used) { if (status) printk(KERN_WARNING "usblp%d: " "nonzero read bulk status received: %d\n", usblp->minor, status); } spin_lock_irqsave(&usblp->lock, flags); if (status < 0) usblp->rstatus = status; else usblp->rstatus = urb->actual_length; usblp->rcomplete = 1; wake_up(&usblp->rwait); spin_unlock_irqrestore(&usblp->lock, flags); usb_free_urb(urb); } static void usblp_bulk_write(struct urb *urb) { struct usblp *usblp = urb->context; int status = urb->status; unsigned long flags; if (usblp->present && usblp->used) { if (status) printk(KERN_WARNING "usblp%d: " "nonzero write bulk status received: %d\n", usblp->minor, status); } spin_lock_irqsave(&usblp->lock, flags); if (status < 0) usblp->wstatus = status; else usblp->wstatus = urb->actual_length; usblp->no_paper = 0; usblp->wcomplete = 1; wake_up(&usblp->wwait); spin_unlock_irqrestore(&usblp->lock, flags); usb_free_urb(urb); } /* * Get and print printer errors. */ static const char *usblp_messages[] = { "ok", "out of paper", "off-line", "on fire" }; static int usblp_check_status(struct usblp *usblp, int err) { unsigned char status, newerr = 0; int error; mutex_lock(&usblp->mut); if ((error = usblp_read_status(usblp, usblp->statusbuf)) < 0) { mutex_unlock(&usblp->mut); printk_ratelimited(KERN_ERR "usblp%d: error %d reading printer status\n", usblp->minor, error); return 0; } status = *usblp->statusbuf; mutex_unlock(&usblp->mut); if (~status & LP_PERRORP) newerr = 3; if (status & LP_POUTPA) newerr = 1; if (~status & LP_PSELECD) newerr = 2; if (newerr != err) { printk(KERN_INFO "usblp%d: %s\n", usblp->minor, usblp_messages[newerr]); } return newerr; } static int handle_bidir(struct usblp *usblp) { if (usblp->bidir && usblp->used) { if (usblp_submit_read(usblp) < 0) return -EIO; } return 0; } /* * File op functions. */ static int usblp_open(struct inode *inode, struct file *file) { int minor = iminor(inode); struct usblp *usblp; struct usb_interface *intf; int retval; if (minor < 0) return -ENODEV; mutex_lock(&usblp_mutex); retval = -ENODEV; intf = usb_find_interface(&usblp_driver, minor); if (!intf) goto out; usblp = usb_get_intfdata(intf); if (!usblp || !usblp->dev || !usblp->present) goto out; retval = -EBUSY; if (usblp->used) goto out; /* * We do not implement LP_ABORTOPEN/LPABORTOPEN for two reasons: * - We do not want persistent state which close(2) does not clear * - It is not used anyway, according to CUPS people */ retval = usb_autopm_get_interface(intf); if (retval < 0) goto out; usblp->used = 1; file->private_data = usblp; usblp->wcomplete = 1; /* we begin writeable */ usblp->wstatus = 0; usblp->rcomplete = 0; if (handle_bidir(usblp) < 0) { usb_autopm_put_interface(intf); usblp->used = 0; file->private_data = NULL; retval = -EIO; } out: mutex_unlock(&usblp_mutex); return retval; } static void usblp_cleanup(struct usblp *usblp) { printk(KERN_INFO "usblp%d: removed\n", usblp->minor); kfree(usblp->readbuf); kfree(usblp->device_id_string); kfree(usblp->statusbuf); usb_put_intf(usblp->intf); kfree(usblp); } static void usblp_unlink_urbs(struct usblp *usblp) { usb_kill_anchored_urbs(&usblp->urbs); } static int usblp_release(struct inode *inode, struct file *file) { struct usblp *usblp = file->private_data; usblp->flags &= ~LP_ABORT; mutex_lock(&usblp_mutex); usblp->used = 0; if (usblp->present) usblp_unlink_urbs(usblp); usb_autopm_put_interface(usblp->intf); if (!usblp->present) /* finish cleanup from disconnect */ usblp_cleanup(usblp); /* any URBs must be dead */ mutex_unlock(&usblp_mutex); return 0; } /* No kernel lock - fine */ static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait) { struct usblp *usblp = file->private_data; __poll_t ret = 0; unsigned long flags; /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */ poll_wait(file, &usblp->rwait, wait); poll_wait(file, &usblp->wwait, wait); mutex_lock(&usblp->mut); if (!usblp->present) ret |= EPOLLHUP; mutex_unlock(&usblp->mut); spin_lock_irqsave(&usblp->lock, flags); if (usblp->bidir && usblp->rcomplete) ret |= EPOLLIN | EPOLLRDNORM; if (usblp->no_paper || usblp->wcomplete) ret |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&usblp->lock, flags); return ret; } static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct usblp *usblp = file->private_data; int length, err, i; unsigned char newChannel; int status; int twoints[2]; int retval = 0; mutex_lock(&usblp->mut); if (!usblp->present) { retval = -ENODEV; goto done; } dev_dbg(&usblp->intf->dev, "usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)\n", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd)); if (_IOC_TYPE(cmd) == 'P') /* new-style ioctl number */ switch (_IOC_NR(cmd)) { case IOCNR_GET_DEVICE_ID: /* get the DEVICE_ID string */ if (_IOC_DIR(cmd) != _IOC_READ) { retval = -EINVAL; goto done; } length = usblp_cache_device_id_string(usblp); if (length < 0) { retval = length; goto done; } if (length > _IOC_SIZE(cmd)) length = _IOC_SIZE(cmd); /* truncate */ if (copy_to_user((void __user *) arg, usblp->device_id_string, (unsigned long) length)) { retval = -EFAULT; goto done; } break; case IOCNR_GET_PROTOCOLS: if (_IOC_DIR(cmd) != _IOC_READ || _IOC_SIZE(cmd) < sizeof(twoints)) { retval = -EINVAL; goto done; } twoints[0] = usblp->current_protocol; twoints[1] = 0; for (i = USBLP_FIRST_PROTOCOL; i <= USBLP_LAST_PROTOCOL; i++) { if (usblp->protocol[i].alt_setting >= 0) twoints[1] |= (1<<i); } if (copy_to_user((void __user *)arg, (unsigned char *)twoints, sizeof(twoints))) { retval = -EFAULT; goto done; } break; case IOCNR_SET_PROTOCOL: if (_IOC_DIR(cmd) != _IOC_WRITE) { retval = -EINVAL; goto done; } #ifdef DEBUG if (arg == -10) { usblp_dump(usblp); break; } #endif usblp_unlink_urbs(usblp); retval = usblp_set_protocol(usblp, arg); if (retval < 0) { usblp_set_protocol(usblp, usblp->current_protocol); } break; case IOCNR_HP_SET_CHANNEL: if (_IOC_DIR(cmd) != _IOC_WRITE || le16_to_cpu(usblp->dev->descriptor.idVendor) != 0x03F0 || usblp->quirks & USBLP_QUIRK_BIDIR) { retval = -EINVAL; goto done; } err = usblp_hp_channel_change_request(usblp, arg, &newChannel); if (err < 0) { dev_err(&usblp->dev->dev, "usblp%d: error = %d setting " "HP channel\n", usblp->minor, err); retval = -EIO; goto done; } dev_dbg(&usblp->intf->dev, "usblp%d requested/got HP channel %ld/%d\n", usblp->minor, arg, newChannel); break; case IOCNR_GET_BUS_ADDRESS: if (_IOC_DIR(cmd) != _IOC_READ || _IOC_SIZE(cmd) < sizeof(twoints)) { retval = -EINVAL; goto done; } twoints[0] = usblp->dev->bus->busnum; twoints[1] = usblp->dev->devnum; if (copy_to_user((void __user *)arg, (unsigned char *)twoints, sizeof(twoints))) { retval = -EFAULT; goto done; } dev_dbg(&usblp->intf->dev, "usblp%d is bus=%d, device=%d\n", usblp->minor, twoints[0], twoints[1]); break; case IOCNR_GET_VID_PID: if (_IOC_DIR(cmd) != _IOC_READ || _IOC_SIZE(cmd) < sizeof(twoints)) { retval = -EINVAL; goto done; } twoints[0] = le16_to_cpu(usblp->dev->descriptor.idVendor); twoints[1] = le16_to_cpu(usblp->dev->descriptor.idProduct); if (copy_to_user((void __user *)arg, (unsigned char *)twoints, sizeof(twoints))) { retval = -EFAULT; goto done; } dev_dbg(&usblp->intf->dev, "usblp%d is VID=0x%4.4X, PID=0x%4.4X\n", usblp->minor, twoints[0], twoints[1]); break; case IOCNR_SOFT_RESET: if (_IOC_DIR(cmd) != _IOC_NONE) { retval = -EINVAL; goto done; } retval = usblp_reset(usblp); break; default: retval = -ENOTTY; } else /* old-style ioctl value */ switch (cmd) { case LPGETSTATUS: retval = usblp_read_status(usblp, usblp->statusbuf); if (retval) { printk_ratelimited(KERN_ERR "usblp%d:" "failed reading printer status (%d)\n", usblp->minor, retval); retval = -EIO; goto done; } status = *usblp->statusbuf; if (copy_to_user((void __user *)arg, &status, sizeof(int))) retval = -EFAULT; break; case LPABORT: if (arg) usblp->flags |= LP_ABORT; else usblp->flags &= ~LP_ABORT; break; default: retval = -ENOTTY; } done: mutex_unlock(&usblp->mut); return retval; } static struct urb *usblp_new_writeurb(struct usblp *usblp, int transfer_length) { struct urb *urb; char *writebuf; writebuf = kmalloc(transfer_length, GFP_KERNEL); if (writebuf == NULL) return NULL; urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { kfree(writebuf); return NULL; } usb_fill_bulk_urb(urb, usblp->dev, usb_sndbulkpipe(usblp->dev, usblp->protocol[usblp->current_protocol].epwrite->bEndpointAddress), writebuf, transfer_length, usblp_bulk_write, usblp); urb->transfer_flags |= URB_FREE_BUFFER; return urb; } static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct usblp *usblp = file->private_data; struct urb *writeurb; int rv; int transfer_length; ssize_t writecount = 0; if (mutex_lock_interruptible(&usblp->wmut)) { rv = -EINTR; goto raise_biglock; } if ((rv = usblp_wwait(usblp, !!(file->f_flags & O_NONBLOCK))) < 0) goto raise_wait; while (writecount < count) { /* * Step 1: Submit next block. */ if ((transfer_length = count - writecount) > USBLP_BUF_SIZE) transfer_length = USBLP_BUF_SIZE; rv = -ENOMEM; writeurb = usblp_new_writeurb(usblp, transfer_length); if (writeurb == NULL) goto raise_urb; usb_anchor_urb(writeurb, &usblp->urbs); if (copy_from_user(writeurb->transfer_buffer, buffer + writecount, transfer_length)) { rv = -EFAULT; goto raise_badaddr; } spin_lock_irq(&usblp->lock); usblp->wcomplete = 0; spin_unlock_irq(&usblp->lock); if ((rv = usb_submit_urb(writeurb, GFP_KERNEL)) < 0) { usblp->wstatus = 0; spin_lock_irq(&usblp->lock); usblp->no_paper = 0; usblp->wcomplete = 1; wake_up(&usblp->wwait); spin_unlock_irq(&usblp->lock); if (rv != -ENOMEM) rv = -EIO; goto raise_submit; } /* * Step 2: Wait for transfer to end, collect results. */ rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK)); if (rv < 0) { if (rv == -EAGAIN) { /* Presume that it's going to complete well. */ writecount += transfer_length; } if (rv == -ENOSPC) { spin_lock_irq(&usblp->lock); usblp->no_paper = 1; /* Mark for poll(2) */ spin_unlock_irq(&usblp->lock); writecount += transfer_length; } /* Leave URB dangling, to be cleaned on close. */ goto collect_error; } if (usblp->wstatus < 0) { rv = -EIO; goto collect_error; } /* * This is critical: it must be our URB, not other writer's. * The wmut exists mainly to cover us here. */ writecount += usblp->wstatus; } mutex_unlock(&usblp->wmut); return writecount; raise_submit: raise_badaddr: usb_unanchor_urb(writeurb); usb_free_urb(writeurb); raise_urb: raise_wait: collect_error: /* Out of raise sequence */ mutex_unlock(&usblp->wmut); raise_biglock: return writecount ? writecount : rv; } /* * Notice that we fail to restart in a few cases: on EFAULT, on restart * error, etc. This is the historical behaviour. In all such cases we return * EIO, and applications loop in order to get the new read going. */ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, loff_t *ppos) { struct usblp *usblp = file->private_data; ssize_t count; ssize_t avail; int rv; if (!usblp->bidir) return -EINVAL; rv = usblp_rwait_and_lock(usblp, !!(file->f_flags & O_NONBLOCK)); if (rv < 0) return rv; if (!usblp->present) { count = -ENODEV; goto done; } if ((avail = usblp->rstatus) < 0) { printk(KERN_ERR "usblp%d: error %d reading from printer\n", usblp->minor, (int)avail); usblp_submit_read(usblp); count = -EIO; goto done; } count = len < avail - usblp->readcount ? len : avail - usblp->readcount; if (count != 0 && copy_to_user(buffer, usblp->readbuf + usblp->readcount, count)) { count = -EFAULT; goto done; } if ((usblp->readcount += count) == avail) { if (usblp_submit_read(usblp) < 0) { /* We don't want to leak USB return codes into errno. */ if (count == 0) count = -EIO; goto done; } } done: mutex_unlock(&usblp->mut); return count; } /* * Wait for the write path to come idle. * This is called under the ->wmut, so the idle path stays idle. * * Our write path has a peculiar property: it does not buffer like a tty, * but waits for the write to succeed. This allows our ->release to bug out * without waiting for writes to drain. But it obviously does not work * when O_NONBLOCK is set. So, applications setting O_NONBLOCK must use * select(2) or poll(2) to wait for the buffer to drain before closing. * Alternatively, set blocking mode with fcntl and issue a zero-size write. */ static int usblp_wwait(struct usblp *usblp, int nonblock) { DECLARE_WAITQUEUE(waita, current); int rc; int err = 0; add_wait_queue(&usblp->wwait, &waita); for (;;) { if (mutex_lock_interruptible(&usblp->mut)) { rc = -EINTR; break; } set_current_state(TASK_INTERRUPTIBLE); rc = usblp_wtest(usblp, nonblock); mutex_unlock(&usblp->mut); if (rc <= 0) break; if (schedule_timeout(msecs_to_jiffies(1500)) == 0) { if (usblp->flags & LP_ABORT) { err = usblp_check_status(usblp, err); if (err == 1) { /* Paper out */ rc = -ENOSPC; break; } } else { /* Prod the printer, Gentoo#251237. */ mutex_lock(&usblp->mut); usblp_read_status(usblp, usblp->statusbuf); mutex_unlock(&usblp->mut); } } } set_current_state(TASK_RUNNING); remove_wait_queue(&usblp->wwait, &waita); return rc; } static int usblp_wtest(struct usblp *usblp, int nonblock) { unsigned long flags; if (!usblp->present) return -ENODEV; if (signal_pending(current)) return -EINTR; spin_lock_irqsave(&usblp->lock, flags); if (usblp->wcomplete) { spin_unlock_irqrestore(&usblp->lock, flags); return 0; } spin_unlock_irqrestore(&usblp->lock, flags); if (nonblock) return -EAGAIN; return 1; } /* * Wait for read bytes to become available. This probably should have been * called usblp_r_lock_and_wait(), because we lock first. But it's a traditional * name for functions which lock and return. * * We do not use wait_event_interruptible because it makes locking iffy. */ static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock) { DECLARE_WAITQUEUE(waita, current); int rc; add_wait_queue(&usblp->rwait, &waita); for (;;) { if (mutex_lock_interruptible(&usblp->mut)) { rc = -EINTR; break; } set_current_state(TASK_INTERRUPTIBLE); if ((rc = usblp_rtest(usblp, nonblock)) < 0) { mutex_unlock(&usblp->mut); break; } if (rc == 0) /* Keep it locked */ break; mutex_unlock(&usblp->mut); schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&usblp->rwait, &waita); return rc; } static int usblp_rtest(struct usblp *usblp, int nonblock) { unsigned long flags; if (!usblp->present) return -ENODEV; if (signal_pending(current)) return -EINTR; spin_lock_irqsave(&usblp->lock, flags); if (usblp->rcomplete) { spin_unlock_irqrestore(&usblp->lock, flags); return 0; } spin_unlock_irqrestore(&usblp->lock, flags); if (nonblock) return -EAGAIN; return 1; } /* * Please check ->bidir and other such things outside for now. */ static int usblp_submit_read(struct usblp *usblp) { struct urb *urb; unsigned long flags; int rc; rc = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) goto raise_urb; usb_fill_bulk_urb(urb, usblp->dev, usb_rcvbulkpipe(usblp->dev, usblp->protocol[usblp->current_protocol].epread->bEndpointAddress), usblp->readbuf, USBLP_BUF_SIZE_IN, usblp_bulk_read, usblp); usb_anchor_urb(urb, &usblp->urbs); spin_lock_irqsave(&usblp->lock, flags); usblp->readcount = 0; /* XXX Why here? */ usblp->rcomplete = 0; spin_unlock_irqrestore(&usblp->lock, flags); if ((rc = usb_submit_urb(urb, GFP_KERNEL)) < 0) { dev_dbg(&usblp->intf->dev, "error submitting urb (%d)\n", rc); spin_lock_irqsave(&usblp->lock, flags); usblp->rstatus = rc; usblp->rcomplete = 1; spin_unlock_irqrestore(&usblp->lock, flags); goto raise_submit; } return 0; raise_submit: usb_unanchor_urb(urb); usb_free_urb(urb); raise_urb: return rc; } /* * Checks for printers that have quirks, such as requiring unidirectional * communication but reporting bidirectional; currently some HP printers * have this flaw (HP 810, 880, 895, etc.), or needing an init string * sent at each open (like some Epsons). * Returns 1 if found, 0 if not found. * * HP recommended that we use the bidirectional interface but * don't attempt any bulk IN transfers from the IN endpoint. * Here's some more detail on the problem: * The problem is not that it isn't bidirectional though. The problem * is that if you request a device ID, or status information, while * the buffers are full, the return data will end up in the print data * buffer. For example if you make sure you never request the device ID * while you are sending print data, and you don't try to query the * printer status every couple of milliseconds, you will probably be OK. */ static unsigned int usblp_quirks(__u16 vendor, __u16 product) { int i; for (i = 0; quirk_printers[i].vendorId; i++) { if (vendor == quirk_printers[i].vendorId && product == quirk_printers[i].productId) return quirk_printers[i].quirks; } return 0; } static const struct file_operations usblp_fops = { .owner = THIS_MODULE, .read = usblp_read, .write = usblp_write, .poll = usblp_poll, .unlocked_ioctl = usblp_ioctl, .compat_ioctl = usblp_ioctl, .open = usblp_open, .release = usblp_release, .llseek = noop_llseek, }; static char *usblp_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } static struct usb_class_driver usblp_class = { .name = "lp%d", .devnode = usblp_devnode, .fops = &usblp_fops, .minor_base = USBLP_MINOR_BASE, }; static ssize_t ieee1284_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct usblp *usblp = usb_get_intfdata(intf); if (usblp->device_id_string[0] == 0 && usblp->device_id_string[1] == 0) return 0; return sprintf(buf, "%s", usblp->device_id_string+2); } static DEVICE_ATTR_RO(ieee1284_id); static struct attribute *usblp_attrs[] = { &dev_attr_ieee1284_id.attr, NULL, }; ATTRIBUTE_GROUPS(usblp); static int usblp_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usblp *usblp; int protocol; int retval; /* Malloc and start initializing usblp structure so we can use it * directly. */ usblp = kzalloc(sizeof(struct usblp), GFP_KERNEL); if (!usblp) { retval = -ENOMEM; goto abort_ret; } usblp->dev = dev; mutex_init(&usblp->wmut); mutex_init(&usblp->mut); spin_lock_init(&usblp->lock); init_waitqueue_head(&usblp->rwait); init_waitqueue_head(&usblp->wwait); init_usb_anchor(&usblp->urbs); usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; usblp->intf = usb_get_intf(intf); /* Malloc device ID string buffer to the largest expected length, * since we can re-query it on an ioctl and a dynamic string * could change in length. */ if (!(usblp->device_id_string = kmalloc(USBLP_DEVICE_ID_SIZE, GFP_KERNEL))) { retval = -ENOMEM; goto abort; } /* * Allocate read buffer. We somewhat wastefully * malloc both regardless of bidirectionality, because the * alternate setting can be changed later via an ioctl. */ if (!(usblp->readbuf = kmalloc(USBLP_BUF_SIZE_IN, GFP_KERNEL))) { retval = -ENOMEM; goto abort; } /* Allocate buffer for printer status */ usblp->statusbuf = kmalloc(STATUS_BUF_SIZE, GFP_KERNEL); if (!usblp->statusbuf) { retval = -ENOMEM; goto abort; } /* Lookup quirks for this printer. */ usblp->quirks = usblp_quirks( le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); /* Analyze and pick initial alternate settings and endpoints. */ protocol = usblp_select_alts(usblp); if (protocol < 0) { dev_dbg(&intf->dev, "incompatible printer-class device 0x%4.4X/0x%4.4X\n", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); retval = -ENODEV; goto abort; } /* Setup the selected alternate setting and endpoints. */ if (usblp_set_protocol(usblp, protocol) < 0) { retval = -ENODEV; /* ->probe isn't ->ioctl */ goto abort; } /* Retrieve and store the device ID string. */ usblp_cache_device_id_string(usblp); #ifdef DEBUG usblp_check_status(usblp, 0); #endif usb_set_intfdata(intf, usblp); usblp->present = 1; retval = usb_register_dev(intf, &usblp_class); if (retval) { dev_err(&intf->dev, "usblp: Not able to get a minor (base %u, slice default): %d\n", USBLP_MINOR_BASE, retval); goto abort_intfdata; } usblp->minor = intf->minor; dev_info(&intf->dev, "usblp%d: USB %sdirectional printer dev %d if %d alt %d proto %d vid 0x%4.4X pid 0x%4.4X\n", usblp->minor, usblp->bidir ? "Bi" : "Uni", dev->devnum, usblp->ifnum, usblp->protocol[usblp->current_protocol].alt_setting, usblp->current_protocol, le16_to_cpu(usblp->dev->descriptor.idVendor), le16_to_cpu(usblp->dev->descriptor.idProduct)); return 0; abort_intfdata: usb_set_intfdata(intf, NULL); abort: kfree(usblp->readbuf); kfree(usblp->statusbuf); kfree(usblp->device_id_string); usb_put_intf(usblp->intf); kfree(usblp); abort_ret: return retval; } /* * We are a "new" style driver with usb_device_id table, * but our requirements are too intricate for simple match to handle. * * The "proto_bias" option may be used to specify the preferred protocol * for all USB printers (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2, * 3=USB_CLASS_PRINTER/1/3). If the device supports the preferred protocol, * then we bind to it. * * The best interface for us is USB_CLASS_PRINTER/1/2, because it * is compatible with a stream of characters. If we find it, we bind to it. * * Note that the people from hpoj.sourceforge.net need to be able to * bind to USB_CLASS_PRINTER/1/3 (MLC/1284.4), so we provide them ioctls * for this purpose. * * Failing USB_CLASS_PRINTER/1/2, we look for USB_CLASS_PRINTER/1/3, * even though it's probably not stream-compatible, because this matches * the behaviour of the old code. * * If nothing else, we bind to USB_CLASS_PRINTER/1/1 * - the unidirectional interface. */ static int usblp_select_alts(struct usblp *usblp) { struct usb_interface *if_alt; struct usb_host_interface *ifd; struct usb_endpoint_descriptor *epwrite, *epread; int p, i; int res; if_alt = usblp->intf; for (p = 0; p < USBLP_MAX_PROTOCOLS; p++) usblp->protocol[p].alt_setting = -1; /* Find out what we have. */ for (i = 0; i < if_alt->num_altsetting; i++) { ifd = &if_alt->altsetting[i]; if (ifd->desc.bInterfaceClass != USB_CLASS_PRINTER || ifd->desc.bInterfaceSubClass != 1) if (!(usblp->quirks & USBLP_QUIRK_BAD_CLASS)) continue; if (ifd->desc.bInterfaceProtocol < USBLP_FIRST_PROTOCOL || ifd->desc.bInterfaceProtocol > USBLP_LAST_PROTOCOL) continue; /* Look for the expected bulk endpoints. */ if (ifd->desc.bInterfaceProtocol > 1) { res = usb_find_common_endpoints(ifd, &epread, &epwrite, NULL, NULL); } else { epread = NULL; res = usb_find_bulk_out_endpoint(ifd, &epwrite); } /* Ignore buggy hardware without the right endpoints. */ if (res) continue; /* Turn off reads for buggy bidirectional printers. */ if (usblp->quirks & USBLP_QUIRK_BIDIR) { printk(KERN_INFO "usblp%d: Disabling reads from " "problematic bidirectional printer\n", usblp->minor); epread = NULL; } usblp->protocol[ifd->desc.bInterfaceProtocol].alt_setting = ifd->desc.bAlternateSetting; usblp->protocol[ifd->desc.bInterfaceProtocol].epwrite = epwrite; usblp->protocol[ifd->desc.bInterfaceProtocol].epread = epread; } /* If our requested protocol is supported, then use it. */ if (proto_bias >= USBLP_FIRST_PROTOCOL && proto_bias <= USBLP_LAST_PROTOCOL && usblp->protocol[proto_bias].alt_setting != -1) return proto_bias; /* Ordering is important here. */ if (usblp->protocol[2].alt_setting != -1) return 2; if (usblp->protocol[1].alt_setting != -1) return 1; if (usblp->protocol[3].alt_setting != -1) return 3; /* If nothing is available, then don't bind to this device. */ return -1; } static int usblp_set_protocol(struct usblp *usblp, int protocol) { int r, alts; if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) return -EINVAL; alts = usblp->protocol[protocol].alt_setting; if (alts < 0) return -EINVAL; /* Don't unnecessarily set the interface if there's a single alt. */ if (usblp->intf->num_altsetting > 1) { r = usb_set_interface(usblp->dev, usblp->ifnum, alts); if (r < 0) { printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", alts, usblp->ifnum); return r; } } usblp->bidir = (usblp->protocol[protocol].epread != NULL); usblp->current_protocol = protocol; dev_dbg(&usblp->intf->dev, "usblp%d set protocol %d\n", usblp->minor, protocol); return 0; } /* Retrieves and caches device ID string. * Returns length, including length bytes but not null terminator. * On error, returns a negative errno value. */ static int usblp_cache_device_id_string(struct usblp *usblp) { int err, length; err = usblp_get_id(usblp, 0, usblp->device_id_string, USBLP_DEVICE_ID_SIZE - 1); if (err < 0) { dev_dbg(&usblp->intf->dev, "usblp%d: error = %d reading IEEE-1284 Device ID string\n", usblp->minor, err); usblp->device_id_string[0] = usblp->device_id_string[1] = '\0'; return -EIO; } /* First two bytes are length in big-endian. * They count themselves, and we copy them into * the user's buffer. */ length = be16_to_cpu(*((__be16 *)usblp->device_id_string)); if (length < 2) length = 2; else if (length >= USBLP_DEVICE_ID_SIZE) length = USBLP_DEVICE_ID_SIZE - 1; usblp->device_id_string[length] = '\0'; dev_dbg(&usblp->intf->dev, "usblp%d Device ID string [len=%d]=\"%s\"\n", usblp->minor, length, &usblp->device_id_string[2]); return length; } static void usblp_disconnect(struct usb_interface *intf) { struct usblp *usblp = usb_get_intfdata(intf); usb_deregister_dev(intf, &usblp_class); if (!usblp || !usblp->dev) { dev_err(&intf->dev, "bogus disconnect\n"); BUG(); } mutex_lock(&usblp_mutex); mutex_lock(&usblp->mut); usblp->present = 0; wake_up(&usblp->wwait); wake_up(&usblp->rwait); usb_set_intfdata(intf, NULL); usblp_unlink_urbs(usblp); mutex_unlock(&usblp->mut); usb_poison_anchored_urbs(&usblp->urbs); if (!usblp->used) usblp_cleanup(usblp); mutex_unlock(&usblp_mutex); } static int usblp_suspend(struct usb_interface *intf, pm_message_t message) { struct usblp *usblp = usb_get_intfdata(intf); usblp_unlink_urbs(usblp); #if 0 /* XXX Do we want this? What if someone is reading, should we fail? */ /* not strictly necessary, but just in case */ wake_up(&usblp->wwait); wake_up(&usblp->rwait); #endif return 0; } static int usblp_resume(struct usb_interface *intf) { struct usblp *usblp = usb_get_intfdata(intf); int r; r = handle_bidir(usblp); return r; } static const struct usb_device_id usblp_ids[] = { { USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 1) }, { USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 2) }, { USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 3) }, { USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 1) }, { USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 2) }, { USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 3) }, { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usblp_ids); static struct usb_driver usblp_driver = { .name = "usblp", .probe = usblp_probe, .disconnect = usblp_disconnect, .suspend = usblp_suspend, .resume = usblp_resume, .id_table = usblp_ids, .dev_groups = usblp_groups, .supports_autosuspend = 1, }; module_usb_driver(usblp_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); module_param(proto_bias, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(proto_bias, "Favourite protocol number"); MODULE_LICENSE("GPL"); |
1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 | // SPDX-License-Identifier: GPL-2.0 /* * bsg.c - block layer implementation of the sg v4 interface */ #include <linux/module.h> #include <linux/init.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/cdev.h> #include <linux/jiffies.h> #include <linux/percpu.h> #include <linux/idr.h> #include <linux/bsg.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" #define BSG_VERSION "0.4" struct bsg_device { struct request_queue *queue; struct device device; struct cdev cdev; int max_queue; unsigned int timeout; unsigned int reserved_size; bsg_sg_io_fn *sg_io_fn; }; static inline struct bsg_device *to_bsg_device(struct inode *inode) { return container_of(inode->i_cdev, struct bsg_device, cdev); } #define BSG_DEFAULT_CMDS 64 #define BSG_MAX_DEVS (1 << MINORBITS) static DEFINE_IDA(bsg_minor_ida); static const struct class bsg_class; static int bsg_major; static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr) { unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT; if (hdr->timeout) timeout = msecs_to_jiffies(hdr->timeout); else if (bd->timeout) timeout = bd->timeout; return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT); } static int bsg_sg_io(struct bsg_device *bd, bool open_for_write, void __user *uarg) { struct sg_io_v4 hdr; int ret; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; if (hdr.guard != 'Q') return -EINVAL; ret = bd->sg_io_fn(bd->queue, &hdr, open_for_write, bsg_timeout(bd, &hdr)); if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; return ret; } static int bsg_open(struct inode *inode, struct file *file) { if (!blk_get_queue(to_bsg_device(inode)->queue)) return -ENXIO; return 0; } static int bsg_release(struct inode *inode, struct file *file) { blk_put_queue(to_bsg_device(inode)->queue); return 0; } static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg) { return put_user(READ_ONCE(bd->max_queue), uarg); } static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg) { int max_queue; if (get_user(max_queue, uarg)) return -EFAULT; if (max_queue < 1) return -EINVAL; WRITE_ONCE(bd->max_queue, max_queue); return 0; } static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct bsg_device *bd = to_bsg_device(file_inode(file)); struct request_queue *q = bd->queue; void __user *uarg = (void __user *) arg; int __user *intp = uarg; int val; switch (cmd) { /* * Our own ioctls */ case SG_GET_COMMAND_Q: return bsg_get_command_q(bd, uarg); case SG_SET_COMMAND_Q: return bsg_set_command_q(bd, uarg); /* * SCSI/sg ioctls */ case SG_GET_VERSION_NUM: return put_user(30527, intp); case SCSI_IOCTL_GET_IDLUN: return put_user(0, intp); case SCSI_IOCTL_GET_BUS_NUMBER: return put_user(0, intp); case SG_SET_TIMEOUT: if (get_user(val, intp)) return -EFAULT; bd->timeout = clock_t_to_jiffies(val); return 0; case SG_GET_TIMEOUT: return jiffies_to_clock_t(bd->timeout); case SG_GET_RESERVED_SIZE: return put_user(min(bd->reserved_size, queue_max_bytes(q)), intp); case SG_SET_RESERVED_SIZE: if (get_user(val, intp)) return -EFAULT; if (val < 0) return -EINVAL; bd->reserved_size = min_t(unsigned int, val, queue_max_bytes(q)); return 0; case SG_EMULATED_HOST: return put_user(1, intp); case SG_IO: return bsg_sg_io(bd, file->f_mode & FMODE_WRITE, uarg); case SCSI_IOCTL_SEND_COMMAND: pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n", current->comm); return -EINVAL; default: return -ENOTTY; } } static const struct file_operations bsg_fops = { .open = bsg_open, .release = bsg_release, .unlocked_ioctl = bsg_ioctl, .compat_ioctl = compat_ptr_ioctl, .owner = THIS_MODULE, .llseek = default_llseek, }; static void bsg_device_release(struct device *dev) { struct bsg_device *bd = container_of(dev, struct bsg_device, device); ida_free(&bsg_minor_ida, MINOR(bd->device.devt)); kfree(bd); } void bsg_unregister_queue(struct bsg_device *bd) { struct gendisk *disk = bd->queue->disk; if (disk && disk->queue_kobj.sd) sysfs_remove_link(&disk->queue_kobj, "bsg"); cdev_device_del(&bd->cdev, &bd->device); put_device(&bd->device); } EXPORT_SYMBOL_GPL(bsg_unregister_queue); struct bsg_device *bsg_register_queue(struct request_queue *q, struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn) { struct bsg_device *bd; int ret; bd = kzalloc(sizeof(*bd), GFP_KERNEL); if (!bd) return ERR_PTR(-ENOMEM); bd->max_queue = BSG_DEFAULT_CMDS; bd->reserved_size = INT_MAX; bd->queue = q; bd->sg_io_fn = sg_io_fn; ret = ida_alloc_max(&bsg_minor_ida, BSG_MAX_DEVS - 1, GFP_KERNEL); if (ret < 0) { if (ret == -ENOSPC) dev_err(parent, "bsg: too many bsg devices\n"); kfree(bd); return ERR_PTR(ret); } bd->device.devt = MKDEV(bsg_major, ret); bd->device.class = &bsg_class; bd->device.parent = parent; bd->device.release = bsg_device_release; dev_set_name(&bd->device, "%s", name); device_initialize(&bd->device); cdev_init(&bd->cdev, &bsg_fops); bd->cdev.owner = THIS_MODULE; ret = cdev_device_add(&bd->cdev, &bd->device); if (ret) goto out_put_device; if (q->disk && q->disk->queue_kobj.sd) { ret = sysfs_create_link(&q->disk->queue_kobj, &bd->device.kobj, "bsg"); if (ret) goto out_device_del; } return bd; out_device_del: cdev_device_del(&bd->cdev, &bd->device); out_put_device: put_device(&bd->device); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(bsg_register_queue); static char *bsg_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); } static const struct class bsg_class = { .name = "bsg", .devnode = bsg_devnode, }; static int __init bsg_init(void) { dev_t devid; int ret; ret = class_register(&bsg_class); if (ret) return ret; ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) goto destroy_bsg_class; bsg_major = MAJOR(devid); printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION " loaded (major %d)\n", bsg_major); return 0; destroy_bsg_class: class_unregister(&bsg_class); return ret; } MODULE_AUTHOR("Jens Axboe"); MODULE_DESCRIPTION(BSG_DESCRIPTION); MODULE_LICENSE("GPL"); device_initcall(bsg_init); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * NXP Wireless LAN device driver: major data structures and prototypes * * Copyright 2011-2020 NXP */ #ifndef _MWIFIEX_MAIN_H_ #define _MWIFIEX_MAIN_H_ #include <linux/completion.h> #include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <net/sock.h> #include <linux/vmalloc.h> #include <linux/firmware.h> #include <linux/ctype.h> #include <linux/of.h> #include <linux/idr.h> #include <linux/inetdevice.h> #include <linux/devcoredump.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/of_irq.h> #include <linux/workqueue.h> #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "pcie.h" #include "usb.h" #include "sdio.h" extern const char driver_version[]; extern bool mfg_mode; extern bool aggr_ctrl; struct mwifiex_adapter; struct mwifiex_private; enum { MWIFIEX_ASYNC_CMD, MWIFIEX_SYNC_CMD }; #define MWIFIEX_DRIVER_MODE_STA BIT(0) #define MWIFIEX_DRIVER_MODE_UAP BIT(1) #define MWIFIEX_DRIVER_MODE_P2P BIT(2) #define MWIFIEX_DRIVER_MODE_BITMASK (BIT(0) | BIT(1) | BIT(2)) #define MWIFIEX_MAX_AP 64 #define MWIFIEX_MAX_PKTS_TXQ 16 #define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ) #define MWIFIEX_TIMER_10S 10000 #define MWIFIEX_TIMER_1S 1000 #define MAX_TX_PENDING 400 #define LOW_TX_PENDING 380 #define HIGH_RX_PENDING 50 #define LOW_RX_PENDING 20 #define MWIFIEX_UPLD_SIZE (2312) #define MAX_EVENT_SIZE 2048 #define MWIFIEX_FW_DUMP_SIZE (2 * 1024 * 1024) #define ARP_FILTER_MAX_BUF_SIZE 68 #define MWIFIEX_KEY_BUFFER_SIZE 16 #define MWIFIEX_DEFAULT_LISTEN_INTERVAL 10 #define MWIFIEX_MAX_REGION_CODE 9 #define DEFAULT_BCN_AVG_FACTOR 8 #define DEFAULT_DATA_AVG_FACTOR 8 #define FIRST_VALID_CHANNEL 0xff #define DEFAULT_AD_HOC_CHANNEL 6 #define DEFAULT_AD_HOC_CHANNEL_A 36 #define DEFAULT_BCN_MISS_TIMEOUT 5 #define MAX_SCAN_BEACON_BUFFER 8000 #define SCAN_BEACON_ENTRY_PAD 6 #define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110 #define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 40 #define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 40 #define MWIFIEX_DEF_SCAN_CHAN_GAP_TIME 50 #define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI))) #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) #define WPA_GTK_OUI_OFFSET 2 #define RSN_GTK_OUI_OFFSET 2 #define MWIFIEX_OUI_NOT_PRESENT 0 #define MWIFIEX_OUI_PRESENT 1 #define PKT_TYPE_MGMT 0xE5 /* * Do not check for data_received for USB, as data_received * is handled in mwifiex_usb_recv for USB */ #define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \ adapter->event_received || \ adapter->data_received) #define MWIFIEX_TYPE_CMD 1 #define MWIFIEX_TYPE_DATA 0 #define MWIFIEX_TYPE_AGGR_DATA 10 #define MWIFIEX_TYPE_EVENT 3 #define MAX_BITMAP_RATES_SIZE 18 #define MAX_CHANNEL_BAND_BG 14 #define MAX_CHANNEL_BAND_A 165 #define MAX_FREQUENCY_BAND_BG 2484 #define MWIFIEX_EVENT_HEADER_LEN 4 #define MWIFIEX_UAP_EVENT_EXTRA_HEADER 2 #define MWIFIEX_TYPE_LEN 4 #define MWIFIEX_USB_TYPE_CMD 0xF00DFACE #define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE #define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE /* Threshold for tx_timeout_cnt before we trigger a card reset */ #define TX_TIMEOUT_THRESHOLD 6 #define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000 /* Address alignment */ #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1)) #define MWIFIEX_MAC_LOCAL_ADMIN_BIT 41 /** *enum mwifiex_debug_level - marvell wifi debug level */ enum MWIFIEX_DEBUG_LEVEL { MWIFIEX_DBG_MSG = 0x00000001, MWIFIEX_DBG_FATAL = 0x00000002, MWIFIEX_DBG_ERROR = 0x00000004, MWIFIEX_DBG_DATA = 0x00000008, MWIFIEX_DBG_CMD = 0x00000010, MWIFIEX_DBG_EVENT = 0x00000020, MWIFIEX_DBG_INTR = 0x00000040, MWIFIEX_DBG_IOCTL = 0x00000080, MWIFIEX_DBG_MPA_D = 0x00008000, MWIFIEX_DBG_DAT_D = 0x00010000, MWIFIEX_DBG_CMD_D = 0x00020000, MWIFIEX_DBG_EVT_D = 0x00040000, MWIFIEX_DBG_FW_D = 0x00080000, MWIFIEX_DBG_IF_D = 0x00100000, MWIFIEX_DBG_ENTRY = 0x10000000, MWIFIEX_DBG_WARN = 0x20000000, MWIFIEX_DBG_INFO = 0x40000000, MWIFIEX_DBG_DUMP = 0x80000000, MWIFIEX_DBG_ANY = 0xffffffff }; #define MWIFIEX_DEFAULT_DEBUG_MASK (MWIFIEX_DBG_MSG | \ MWIFIEX_DBG_FATAL | \ MWIFIEX_DBG_ERROR) __printf(3, 4) void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask, const char *fmt, ...); #define mwifiex_dbg(adapter, mask, fmt, ...) \ _mwifiex_dbg(adapter, MWIFIEX_DBG_##mask, fmt, ##__VA_ARGS__) #define DEBUG_DUMP_DATA_MAX_LEN 128 #define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len) \ do { \ if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \ print_hex_dump(KERN_DEBUG, str, \ DUMP_PREFIX_OFFSET, 16, 1, \ buf, len, false); \ } while (0) /** Min BGSCAN interval 15 second */ #define MWIFIEX_BGSCAN_INTERVAL 15000 /** default repeat count */ #define MWIFIEX_BGSCAN_REPEAT_COUNT 6 struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; u32 num_tx_host_to_card_failure; u32 num_event_deauth; u32 num_event_disassoc; u32 num_event_link_lost; u32 num_cmd_deauth; u32 num_cmd_assoc_success; u32 num_cmd_assoc_failure; u32 num_tx_timeout; u16 timeout_cmd_id; u16 timeout_cmd_act; u16 last_cmd_id[DBG_CMD_NUM]; u16 last_cmd_act[DBG_CMD_NUM]; u16 last_cmd_index; u16 last_cmd_resp_id[DBG_CMD_NUM]; u16 last_cmd_resp_index; u16 last_event[DBG_CMD_NUM]; u16 last_event_index; u32 last_mp_wr_bitmap[MWIFIEX_DBG_SDIO_MP_NUM]; u32 last_mp_wr_ports[MWIFIEX_DBG_SDIO_MP_NUM]; u32 last_mp_wr_len[MWIFIEX_DBG_SDIO_MP_NUM]; u32 last_mp_curr_wr_port[MWIFIEX_DBG_SDIO_MP_NUM]; u8 last_sdio_mp_index; }; enum MWIFIEX_HARDWARE_STATUS { MWIFIEX_HW_STATUS_READY, MWIFIEX_HW_STATUS_INITIALIZING, MWIFIEX_HW_STATUS_RESET, MWIFIEX_HW_STATUS_NOT_READY }; enum MWIFIEX_802_11_POWER_MODE { MWIFIEX_802_11_POWER_MODE_CAM, MWIFIEX_802_11_POWER_MODE_PSP }; struct mwifiex_tx_param { u32 next_pkt_len; }; enum MWIFIEX_PS_STATE { PS_STATE_AWAKE, PS_STATE_PRE_SLEEP, PS_STATE_SLEEP_CFM, PS_STATE_SLEEP }; enum mwifiex_iface_type { MWIFIEX_SDIO, MWIFIEX_PCIE, MWIFIEX_USB }; struct mwifiex_add_ba_param { u32 tx_win_size; u32 rx_win_size; u32 timeout; u8 tx_amsdu; u8 rx_amsdu; }; struct mwifiex_tx_aggr { u8 ampdu_user; u8 ampdu_ap; u8 amsdu; }; enum mwifiex_ba_status { BA_SETUP_NONE = 0, BA_SETUP_INPROGRESS, BA_SETUP_COMPLETE }; struct mwifiex_ra_list_tbl { struct list_head list; struct sk_buff_head skb_head; u8 ra[ETH_ALEN]; u32 is_11n_enabled; u16 max_amsdu; u16 ba_pkt_count; u8 ba_packet_thr; enum mwifiex_ba_status ba_status; u8 amsdu_in_ampdu; u16 total_pkt_count; bool tdls_link; bool tx_paused; }; struct mwifiex_tid_tbl { struct list_head ra_list; }; #define WMM_HIGHEST_PRIORITY 7 #define HIGH_PRIO_TID 7 #define LOW_PRIO_TID 0 #define NO_PKT_PRIO_TID -1 #define MWIFIEX_WMM_DRV_DELAY_MAX 510 struct mwifiex_wmm_desc { struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; u32 packets_out[MAX_NUM_TID]; u32 pkts_paused[MAX_NUM_TID]; /* spin lock to protect ra_list */ spinlock_t ra_list_spinlock; struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS]; enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_NUM_ACS]; u32 drv_pkt_delay_max; u8 queue_priority[IEEE80211_NUM_ACS]; u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */ /* Number of transmit packets queued */ atomic_t tx_pkts_queued; /* Tracks highest priority with a packet queued */ atomic_t highest_queued_prio; }; struct mwifiex_802_11_security { u8 wpa_enabled; u8 wpa2_enabled; u8 wapi_enabled; u8 wapi_key_on; u8 wep_enabled; u32 authentication_mode; u8 is_authtype_auto; u32 encryption_mode; }; struct ieee_types_header { u8 element_id; u8 len; } __packed; struct ieee_types_vendor_specific { struct ieee_types_vendor_header vend_hdr; u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)]; } __packed; struct ieee_types_generic { struct ieee_types_header ieee_hdr; u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)]; } __packed; struct ieee_types_bss_co_2040 { struct ieee_types_header ieee_hdr; u8 bss_2040co; } __packed; struct ieee_types_extcap { struct ieee_types_header ieee_hdr; u8 ext_capab[8]; } __packed; struct ieee_types_vht_cap { struct ieee_types_header ieee_hdr; struct ieee80211_vht_cap vhtcap; } __packed; struct ieee_types_vht_oper { struct ieee_types_header ieee_hdr; struct ieee80211_vht_operation vhtoper; } __packed; struct ieee_types_aid { struct ieee_types_header ieee_hdr; u16 aid; } __packed; struct mwifiex_bssdescriptor { u8 mac_address[ETH_ALEN]; struct cfg80211_ssid ssid; u32 privacy; s32 rssi; u32 channel; u32 freq; u16 beacon_period; u8 erp_flags; u32 bss_mode; u8 supported_rates[MWIFIEX_SUPPORTED_RATES]; u8 data_rates[MWIFIEX_SUPPORTED_RATES]; /* Network band. * BAND_B(0x01): 'b' band * BAND_G(0x02): 'g' band * BAND_A(0X04): 'a' band */ u16 bss_band; u64 fw_tsf; u64 timestamp; union ieee_types_phy_param_set phy_param_set; union ieee_types_ss_param_set ss_param_set; u16 cap_info_bitmap; struct ieee_types_wmm_parameter wmm_ie; u8 disable_11n; struct ieee80211_ht_cap *bcn_ht_cap; u16 ht_cap_offset; struct ieee80211_ht_operation *bcn_ht_oper; u16 ht_info_offset; u8 *bcn_bss_co_2040; u16 bss_co_2040_offset; u8 *bcn_ext_cap; u16 ext_cap_offset; struct ieee80211_vht_cap *bcn_vht_cap; u16 vht_cap_offset; struct ieee80211_vht_operation *bcn_vht_oper; u16 vht_info_offset; struct ieee_types_oper_mode_ntf *oper_mode; u16 oper_mode_offset; u8 disable_11ac; struct ieee_types_vendor_specific *bcn_wpa_ie; u16 wpa_offset; struct ieee_types_generic *bcn_rsn_ie; u16 rsn_offset; struct ieee_types_generic *bcn_rsnx_ie; u16 rsnx_offset; struct ieee_types_generic *bcn_wapi_ie; u16 wapi_offset; u8 *beacon_buf; u32 beacon_buf_size; u8 sensed_11h; u8 local_constraint; u8 chan_sw_ie_present; }; struct mwifiex_current_bss_params { struct mwifiex_bssdescriptor bss_descriptor; u8 wmm_enabled; u8 wmm_uapsd_enabled; u8 band; u32 num_of_rates; u8 data_rates[MWIFIEX_SUPPORTED_RATES]; }; struct mwifiex_sleep_period { u16 period; u16 reserved; }; struct mwifiex_wep_key { u32 length; u32 key_index; u32 key_length; u8 key_material[MWIFIEX_KEY_BUFFER_SIZE]; }; #define MAX_REGION_CHANNEL_NUM 2 struct mwifiex_chan_freq_power { u16 channel; u32 freq; u16 max_tx_power; u8 unsupported; }; enum state_11d_t { DISABLE_11D = 0, ENABLE_11D = 1, }; #define MWIFIEX_MAX_TRIPLET_802_11D 83 struct mwifiex_802_11d_domain_reg { u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; u8 no_of_triplet; struct ieee80211_country_ie_triplet triplet[MWIFIEX_MAX_TRIPLET_802_11D]; }; struct mwifiex_vendor_spec_cfg_ie { u16 mask; u16 flag; u8 ie[MWIFIEX_MAX_VSIE_LEN]; }; struct wps { u8 session_enable; }; struct mwifiex_roc_cfg { u64 cookie; struct ieee80211_channel chan; }; enum mwifiex_iface_work_flags { MWIFIEX_IFACE_WORK_DEVICE_DUMP, MWIFIEX_IFACE_WORK_CARD_RESET, }; enum mwifiex_adapter_work_flags { MWIFIEX_SURPRISE_REMOVED, MWIFIEX_IS_CMD_TIMEDOUT, MWIFIEX_IS_SUSPENDED, MWIFIEX_IS_HS_CONFIGURED, MWIFIEX_IS_HS_ENABLING, MWIFIEX_IS_REQUESTING_FW_VEREXT, }; struct mwifiex_band_config { u8 chan_band:2; u8 chan_width:2; u8 chan2_offset:2; u8 scan_mode:2; } __packed; struct mwifiex_channel_band { struct mwifiex_band_config band_config; u8 channel; }; struct mwifiex_private { struct mwifiex_adapter *adapter; u8 bss_type; u8 bss_role; u8 bss_priority; u8 bss_num; u8 bss_started; u8 auth_flag; u16 auth_alg; u8 frame_type; u8 curr_addr[ETH_ALEN]; u8 media_connected; u8 port_open; u8 usb_port; u32 num_tx_timeout; /* track consecutive timeout */ u8 tx_timeout_cnt; struct net_device *netdev; struct net_device_stats stats; u32 curr_pkt_filter; u32 bss_mode; u32 pkt_tx_ctrl; u16 tx_power_level; u8 max_tx_power_level; u8 min_tx_power_level; u32 tx_ant; u32 rx_ant; u8 tx_rate; u8 tx_htinfo; u8 rxpd_htinfo; u8 rxpd_rate; u16 rate_bitmap; u16 bitmap_rates[MAX_BITMAP_RATES_SIZE]; u32 data_rate; u8 is_data_rate_auto; u16 bcn_avg_factor; u16 data_avg_factor; s16 data_rssi_last; s16 data_nf_last; s16 data_rssi_avg; s16 data_nf_avg; s16 bcn_rssi_last; s16 bcn_nf_last; s16 bcn_rssi_avg; s16 bcn_nf_avg; struct mwifiex_bssdescriptor *attempted_bss_desc; struct cfg80211_ssid prev_ssid; u8 prev_bssid[ETH_ALEN]; struct mwifiex_current_bss_params curr_bss_params; u16 beacon_period; u8 dtim_period; u16 listen_interval; u16 atim_window; u8 adhoc_channel; u8 adhoc_state; struct mwifiex_802_11_security sec_info; struct mwifiex_wep_key wep_key[NUM_WEP_KEYS]; u16 wep_key_curr_index; u8 wpa_ie[256]; u16 wpa_ie_len; u8 wpa_is_gtk_set; struct host_cmd_ds_802_11_key_material aes_key; struct host_cmd_ds_802_11_key_material_v2 aes_key_v2; u8 wapi_ie[256]; u16 wapi_ie_len; u8 *wps_ie; u16 wps_ie_len; u8 wmm_required; u8 wmm_enabled; u8 wmm_qosinfo; struct mwifiex_wmm_desc wmm; atomic_t wmm_tx_pending[IEEE80211_NUM_ACS]; struct list_head sta_list; /* spin lock for associated station/TDLS peers list */ spinlock_t sta_list_spinlock; struct list_head auto_tdls_list; /* spin lock for auto TDLS peer list */ spinlock_t auto_tdls_lock; struct list_head tx_ba_stream_tbl_ptr; /* spin lock for tx_ba_stream_tbl_ptr queue */ spinlock_t tx_ba_stream_tbl_lock; struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID]; struct mwifiex_add_ba_param add_ba_param; u16 rx_seq[MAX_NUM_TID]; u8 tos_to_tid_inv[MAX_NUM_TID]; struct list_head rx_reorder_tbl_ptr; /* spin lock for rx_reorder_tbl_ptr queue */ spinlock_t rx_reorder_tbl_lock; #define MWIFIEX_ASSOC_RSP_BUF_SIZE 500 u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE]; u32 assoc_rsp_size; struct cfg80211_bss *req_bss; #define MWIFIEX_GENIE_BUF_SIZE 256 u8 gen_ie_buf[MWIFIEX_GENIE_BUF_SIZE]; u8 gen_ie_buf_len; struct mwifiex_vendor_spec_cfg_ie vs_ie[MWIFIEX_MAX_VSIE_NUM]; #define MWIFIEX_ASSOC_TLV_BUF_SIZE 256 u8 assoc_tlv_buf[MWIFIEX_ASSOC_TLV_BUF_SIZE]; u8 assoc_tlv_buf_len; u8 *curr_bcn_buf; u32 curr_bcn_size; /* spin lock for beacon buffer */ spinlock_t curr_bcn_buf_lock; struct wireless_dev wdev; struct mwifiex_chan_freq_power cfp; u32 versionstrsel; char version_str[MWIFIEX_VERSION_STR_LENGTH]; #ifdef CONFIG_DEBUG_FS struct dentry *dfs_dev_dir; #endif u16 current_key_index; struct mutex async_mutex; struct cfg80211_scan_request *scan_request; u8 cfg_bssid[6]; struct wps wps; u8 scan_block; s32 cqm_rssi_thold; u32 cqm_rssi_hyst; u8 subsc_evt_rssi_state; struct mwifiex_ds_misc_subsc_evt async_subsc_evt_storage; struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX]; u16 beacon_idx; u16 proberesp_idx; u16 assocresp_idx; u16 gen_idx; u8 ap_11n_enabled; u8 ap_11ac_enabled; bool host_mlme_reg; u32 mgmt_frame_mask; struct mwifiex_roc_cfg roc_cfg; bool scan_aborting; u8 sched_scanning; u8 csa_chan; unsigned long csa_expire_time; u8 del_list_idx; bool hs2_enabled; struct mwifiex_uap_bss_param bss_cfg; struct cfg80211_chan_def bss_chandef; struct station_parameters *sta_params; struct sk_buff_head tdls_txq; u8 check_tdls_tx; struct timer_list auto_tdls_timer; bool auto_tdls_timer_active; struct idr ack_status_frames; /* spin lock for ack status */ spinlock_t ack_status_lock; /** rx histogram data */ struct mwifiex_histogram_data *hist_data; struct cfg80211_chan_def dfs_chandef; struct workqueue_struct *dfs_cac_workqueue; struct delayed_work dfs_cac_work; struct workqueue_struct *dfs_chan_sw_workqueue; struct delayed_work dfs_chan_sw_work; struct cfg80211_beacon_data beacon_after; struct mwifiex_11h_intf_state state_11h; struct mwifiex_ds_mem_rw mem_rw; struct sk_buff_head bypass_txq; struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX]; bool ht_param_present; }; struct mwifiex_tx_ba_stream_tbl { struct list_head list; int tid; u8 ra[ETH_ALEN]; enum mwifiex_ba_status ba_status; u8 amsdu; }; struct mwifiex_rx_reorder_tbl; struct reorder_tmr_cnxt { struct timer_list timer; struct mwifiex_rx_reorder_tbl *ptr; struct mwifiex_private *priv; u8 timer_is_set; }; struct mwifiex_rx_reorder_tbl { struct list_head list; int tid; u8 ta[ETH_ALEN]; int init_win; int start_win; int win_size; void **rx_reorder_ptr; struct reorder_tmr_cnxt timer_context; u8 amsdu; u8 flags; }; struct mwifiex_bss_prio_node { struct list_head list; struct mwifiex_private *priv; }; struct mwifiex_bss_prio_tbl { struct list_head bss_prio_head; /* spin lock for bss priority */ spinlock_t bss_prio_lock; struct mwifiex_bss_prio_node *bss_prio_cur; }; struct cmd_ctrl_node { struct list_head list; struct mwifiex_private *priv; u32 cmd_no; u32 cmd_flag; struct sk_buff *cmd_skb; struct sk_buff *resp_skb; void *data_buf; u32 wait_q_enabled; struct sk_buff *skb; u8 *condition; u8 cmd_wait_q_woken; }; struct mwifiex_bss_priv { u8 band; u64 fw_tsf; }; struct mwifiex_tdls_capab { __le16 capab; u8 rates[32]; u8 rates_len; u8 qos_info; u8 coex_2040; u16 aid; struct ieee80211_ht_cap ht_capb; struct ieee80211_ht_operation ht_oper; struct ieee_types_extcap extcap; struct ieee_types_generic rsn_ie; struct ieee80211_vht_cap vhtcap; struct ieee80211_vht_operation vhtoper; }; struct mwifiex_station_stats { u64 last_rx; s8 rssi; u64 rx_bytes; u64 tx_bytes; u32 rx_packets; u32 tx_packets; u32 tx_failed; u8 last_tx_rate; u8 last_tx_htinfo; }; /* This is AP/TDLS specific structure which stores information * about associated/peer STA */ struct mwifiex_sta_node { struct list_head list; u8 mac_addr[ETH_ALEN]; u8 is_wmm_enabled; u8 is_11n_enabled; u8 is_11ac_enabled; u8 ampdu_sta[MAX_NUM_TID]; u16 rx_seq[MAX_NUM_TID]; u16 max_amsdu; u8 tdls_status; struct mwifiex_tdls_capab tdls_cap; struct mwifiex_station_stats stats; u8 tx_pause; }; struct mwifiex_auto_tdls_peer { struct list_head list; u8 mac_addr[ETH_ALEN]; u8 tdls_status; int rssi; unsigned long rssi_jiffies; u8 failure_count; u8 do_discover; }; #define MWIFIEX_TYPE_AGGR_DATA_V2 11 #define MWIFIEX_BUS_AGGR_MODE_LEN_V2 (2) #define MWIFIEX_BUS_AGGR_MAX_LEN 16000 #define MWIFIEX_BUS_AGGR_MAX_NUM 10 struct bus_aggr_params { u16 enable; u16 mode; u16 tx_aggr_max_size; u16 tx_aggr_max_num; u16 tx_aggr_align; }; struct mwifiex_if_ops { int (*init_if) (struct mwifiex_adapter *); void (*cleanup_if) (struct mwifiex_adapter *); int (*check_fw_status) (struct mwifiex_adapter *, u32); int (*check_winner_status)(struct mwifiex_adapter *); int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); int (*register_dev) (struct mwifiex_adapter *); void (*unregister_dev) (struct mwifiex_adapter *); int (*enable_int) (struct mwifiex_adapter *); void (*disable_int) (struct mwifiex_adapter *); int (*process_int_status) (struct mwifiex_adapter *); int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *, struct mwifiex_tx_param *); int (*wakeup) (struct mwifiex_adapter *); int (*wakeup_complete) (struct mwifiex_adapter *); /* Interface specific functions */ void (*update_mp_end_port) (struct mwifiex_adapter *, u16); void (*cleanup_mpa_buf) (struct mwifiex_adapter *); int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); void (*init_fw_port)(struct mwifiex_adapter *adapter); int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); void (*card_reset) (struct mwifiex_adapter *); int (*reg_dump)(struct mwifiex_adapter *, char *); void (*device_dump)(struct mwifiex_adapter *); void (*clean_pcie_ring)(struct mwifiex_adapter *adapter); void (*iface_work)(struct work_struct *work); void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *); void (*multi_port_resync)(struct mwifiex_adapter *); bool (*is_port_ready)(struct mwifiex_private *); void (*down_dev)(struct mwifiex_adapter *); void (*up_dev)(struct mwifiex_adapter *); }; struct mwifiex_adapter { u8 iface_type; unsigned int debug_mask; struct mwifiex_iface_comb iface_limit; struct mwifiex_iface_comb curr_iface_comb; struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM]; u8 priv_num; const struct firmware *firmware; char fw_name[32]; int winner; struct device *dev; struct wiphy *wiphy; u8 perm_addr[ETH_ALEN]; unsigned long work_flags; u32 fw_release_number; u8 intf_hdr_len; void *card; struct mwifiex_if_ops if_ops; atomic_t bypass_tx_pending; atomic_t rx_pending; atomic_t tx_pending; atomic_t cmd_pending; atomic_t tx_hw_pending; struct workqueue_struct *workqueue; struct work_struct main_work; struct workqueue_struct *rx_workqueue; struct work_struct rx_work; struct workqueue_struct *host_mlme_workqueue; struct work_struct host_mlme_work; bool rx_work_enabled; bool rx_processing; bool delay_main_work; bool rx_locked; bool main_locked; struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM]; /* spin lock for main process */ spinlock_t main_proc_lock; u32 mwifiex_processing; u8 more_task_flag; u16 tx_buf_size; u16 curr_tx_buf_size; /* sdio single port rx aggregation capability */ bool host_disable_sdio_rx_aggr; bool sdio_rx_aggr_enable; u16 sdio_rx_block_size; u32 ioport; enum MWIFIEX_HARDWARE_STATUS hw_status; u16 number_of_antenna; u32 fw_cap_info; /* spin lock for interrupt handling */ spinlock_t int_lock; u8 int_status; u32 event_cause; struct sk_buff *event_skb; u8 upld_buf[MWIFIEX_UPLD_SIZE]; u8 data_sent; u8 cmd_sent; u8 cmd_resp_received; u8 event_received; u8 data_received; u8 assoc_resp_received; struct mwifiex_private *priv_link_lost; u8 host_mlme_link_lost; u16 seq_num; struct cmd_ctrl_node *cmd_pool; struct cmd_ctrl_node *curr_cmd; /* spin lock for command */ spinlock_t mwifiex_cmd_lock; struct timer_list cmd_timer; struct list_head cmd_free_q; /* spin lock for cmd_free_q */ spinlock_t cmd_free_q_lock; struct list_head cmd_pending_q; /* spin lock for cmd_pending_q */ spinlock_t cmd_pending_q_lock; struct list_head scan_pending_q; /* spin lock for scan_pending_q */ spinlock_t scan_pending_q_lock; /* spin lock for RX processing routine */ spinlock_t rx_proc_lock; struct sk_buff_head tx_data_q; atomic_t tx_queued; u32 scan_processing; u16 region_code; struct mwifiex_802_11d_domain_reg domain_reg; u16 scan_probes; u32 scan_mode; u16 specific_scan_time; u16 active_scan_time; u16 passive_scan_time; u16 scan_chan_gap_time; u8 fw_bands; u8 adhoc_start_band; u8 config_bands; u8 tx_lock_flag; struct mwifiex_sleep_period sleep_period; u16 ps_mode; u32 ps_state; u8 need_to_wakeup; u16 multiple_dtim; u16 local_listen_interval; u16 null_pkt_interval; struct sk_buff *sleep_cfm; u16 bcn_miss_time_out; u16 adhoc_awake_period; u8 is_deep_sleep; u8 delay_null_pkt; u16 delay_to_ps; u16 enhanced_ps_mode; u8 pm_wakeup_card_req; u16 gen_null_pkt; u16 pps_uapsd_mode; u32 pm_wakeup_fw_try; struct timer_list wakeup_timer; struct mwifiex_hs_config_param hs_cfg; u8 hs_activated; u8 hs_activated_manually; u16 hs_activate_wait_q_woken; wait_queue_head_t hs_activate_wait_q; u8 event_body[MAX_EVENT_SIZE]; u32 hw_dot_11n_dev_cap; u8 hw_dev_mcs_support; u8 user_dev_mcs_support; u8 adhoc_11n_enabled; u8 sec_chan_offset; struct mwifiex_dbg dbg; u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE]; u32 arp_filter_size; struct mwifiex_wait_queue cmd_wait_q; u8 scan_wait_q_woken; spinlock_t queue_lock; /* lock for tx queues */ u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; u16 max_mgmt_ie_index; const struct firmware *cal_data; struct device_node *dt_node; /* 11AC */ u32 is_hw_11ac_capable; u32 hw_dot_11ac_dev_cap; u32 hw_dot_11ac_mcs_support; u32 usr_dot_11ac_dev_cap_bg; u32 usr_dot_11ac_dev_cap_a; u32 usr_dot_11ac_mcs_support; atomic_t pending_bridged_pkts; /* For synchronizing FW initialization with device lifecycle. */ struct completion *fw_done; bool is_up; bool ext_scan; bool host_mlme_enabled; struct ieee80211_txrx_stypes mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES]; u8 fw_api_ver; u8 key_api_major_ver, key_api_minor_ver; u8 max_p2p_conn, max_sta_conn; struct memory_type_mapping *mem_type_mapping_tbl; u8 num_mem_types; bool scan_chan_gap_enabled; struct sk_buff_head rx_data_q; bool mfg_mode; struct mwifiex_chan_stats *chan_stats; u32 num_in_chan_stats; int survey_idx; bool auto_tdls; u8 coex_scan; u8 coex_min_scan_time; u8 coex_max_scan_time; u8 coex_win_size; u8 coex_tx_win_size; u8 coex_rx_win_size; bool drcs_enabled; u8 active_scan_triggered; bool usb_mc_status; bool usb_mc_setup; struct cfg80211_wowlan_nd_info *nd_info; struct ieee80211_regdomain *regd; /* Wake-on-WLAN (WoWLAN) */ int irq_wakeup; bool wake_by_wifi; /* Aggregation parameters*/ struct bus_aggr_params bus_aggr; /* Device dump data/length */ void *devdump_data; int devdump_len; struct delayed_work devdump_work; bool ignore_btcoex_events; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); void mwifiex_set_trans_start(struct net_device *dev); void mwifiex_stop_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter); void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter); int mwifiex_init_priv(struct mwifiex_private *priv); void mwifiex_free_priv(struct mwifiex_private *priv); int mwifiex_init_fw(struct mwifiex_adapter *adapter); void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter); int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *); int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_uap_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb); void mwifiex_host_mlme_disconnect(struct mwifiex_private *priv, u16 reason_code, u8 *sa); int mwifiex_process_mgmt_packet(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_process_event(struct mwifiex_adapter *adapter); int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync); void mwifiex_cmd_timeout_func(struct timer_list *t); int mwifiex_get_debug_info(struct mwifiex_private *, struct mwifiex_debug_info *); int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter); void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_scan(struct mwifiex_adapter *adapter); void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter); int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter); void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter); int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb); int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, struct mwifiex_tx_param *tx_param); int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags); int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, struct sk_buff *skb, int aggr, int status); void mwifiex_clean_txrx(struct mwifiex_private *priv); u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv); void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter); void mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *, u8 *, u32); int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, u16 cmd_action, uint16_t ps_bitmap, struct mwifiex_ds_auto_ds *auto_ds); int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, struct host_cmd_ds_command *resp, struct mwifiex_ds_pm_cfg *pm_cfg); void mwifiex_process_hs_config(struct mwifiex_adapter *adapter); void mwifiex_hs_activated_event(struct mwifiex_adapter *adapter, u8 activated); int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action, int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg); int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int mwifiex_process_rx_packet(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no, u16 cmd_action, u32 cmd_oid, void *data_buf, void *cmd_buf); int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, u16 cmd_action, u32 cmd_oid, void *data_buf, void *cmd_buf); int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no, struct host_cmd_ds_command *resp); int mwifiex_process_sta_rx_packet(struct mwifiex_private *, struct sk_buff *skb); int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_process_sta_event(struct mwifiex_private *); int mwifiex_process_uap_event(struct mwifiex_private *); void mwifiex_delete_all_station_list(struct mwifiex_private *priv); void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr); void mwifiex_process_sta_txpd(struct mwifiex_private *priv, struct sk_buff *skb); void mwifiex_process_uap_txpd(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta); int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, struct mwifiex_scan_cmd_config *scan_cfg); void mwifiex_queue_scan_cmd(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node); int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int mwifiex_associate(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason, bool from_ap); u8 mwifiex_band_to_radio_type(u8 band); int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter); int mwifiex_adhoc_start(struct mwifiex_private *priv, struct cfg80211_ssid *adhoc_ssid); int mwifiex_adhoc_join(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct cfg80211_ssid *req_ssid); int mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd); struct mwifiex_chan_freq_power *mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq); u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index, u8 ht_info); u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv, u8 index, u8 ht_info); u32 mwifiex_find_freq_from_band_chan(u8, u8); int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask, u8 **buffer); u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv, u8 *rates); u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates); u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv, u8 *rates, u8 radio_type); u8 mwifiex_is_rate_auto(struct mwifiex_private *priv); extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE]; void mwifiex_save_curr_bcn(struct mwifiex_private *priv); void mwifiex_free_curr_bcn(struct mwifiex_private *priv); int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd); int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int is_command_pending(struct mwifiex_adapter *adapter); void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev); int mwifiex_set_secure_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_config, struct cfg80211_ap_settings *params); void mwifiex_set_ht_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params); void mwifiex_set_vht_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params); void mwifiex_set_tpc_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params); void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params); void mwifiex_set_vht_width(struct mwifiex_private *priv, enum nl80211_chan_width width, bool ap_11ac_disable); void mwifiex_set_wmm_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params); void mwifiex_set_ba_params(struct mwifiex_private *priv); void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *pmadapter); void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv, struct sk_buff *event_skb); void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv); int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf); int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, void *buf); int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf); int mwifiex_stop_bg_scan(struct mwifiex_private *priv); /* * This function checks if the queuing is RA based or not. */ static inline u8 mwifiex_queuing_ra_based(struct mwifiex_private *priv) { /* * Currently we assume if we are in Infra, then DA=RA. This might not be * true in the future */ if ((priv->bss_mode == NL80211_IFTYPE_STATION || priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) && (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)) return false; return true; } /* * This function copies rates. */ static inline u32 mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len) { int i; for (i = 0; i < len && src[i]; i++, pos++) { if (pos >= MWIFIEX_SUPPORTED_RATES) break; dest[pos] = src[i]; } return pos; } /* * This function returns the correct private structure pointer based * upon the BSS type and BSS number. */ static inline struct mwifiex_private * mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter, u8 bss_num, u8 bss_type) { int i; for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) continue; if ((adapter->priv[i]->bss_num == bss_num) && (adapter->priv[i]->bss_type == bss_type)) break; } return ((i < adapter->priv_num) ? adapter->priv[i] : NULL); } /* * This function returns the first available private structure pointer * based upon the BSS role. */ static inline struct mwifiex_private * mwifiex_get_priv(struct mwifiex_adapter *adapter, enum mwifiex_bss_role bss_role) { int i; for (i = 0; i < adapter->priv_num; i++) { if (bss_role == MWIFIEX_BSS_ROLE_ANY || GET_BSS_ROLE(adapter->priv[i]) == bss_role) break; } return ((i < adapter->priv_num) ? adapter->priv[i] : NULL); } /* * This function checks available bss_num when adding new interface or * changing interface type. */ static inline u8 mwifiex_get_unused_bss_num(struct mwifiex_adapter *adapter, u8 bss_type) { u8 i, j; int index[MWIFIEX_MAX_BSS_NUM]; memset(index, 0, sizeof(index)); for (i = 0; i < adapter->priv_num; i++) if (adapter->priv[i]->bss_type == bss_type && !(adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)) { index[adapter->priv[i]->bss_num] = 1; } for (j = 0; j < MWIFIEX_MAX_BSS_NUM; j++) if (!index[j]) return j; return -1; } /* * This function returns the first available unused private structure pointer. */ static inline struct mwifiex_private * mwifiex_get_unused_priv_by_bss_type(struct mwifiex_adapter *adapter, u8 bss_type) { u8 i; for (i = 0; i < adapter->priv_num; i++) if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) { adapter->priv[i]->bss_num = mwifiex_get_unused_bss_num(adapter, bss_type); break; } return ((i < adapter->priv_num) ? adapter->priv[i] : NULL); } /* * This function returns the driver private structure of a network device. */ static inline struct mwifiex_private * mwifiex_netdev_get_priv(struct net_device *dev) { return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev)); } /* * This function checks if a skb holds a management frame. */ static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb) { return (get_unaligned_le32(skb->data) == PKT_TYPE_MGMT); } /* This function retrieves channel closed for operation by Channel * Switch Announcement. */ static inline u8 mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv) { if (!priv->csa_chan) return 0; /* Clear csa channel, if DFS channel move time has passed */ if (time_after(jiffies, priv->csa_expire_time)) { priv->csa_chan = 0; priv->csa_expire_time = 0; } return priv->csa_chan; } static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv) { struct mwifiex_private *priv_num; int i; for (i = 0; i < priv->adapter->priv_num; i++) { priv_num = priv->adapter->priv[i]; if (priv_num) { if ((GET_BSS_ROLE(priv_num) == MWIFIEX_BSS_ROLE_UAP && priv_num->bss_started) || (GET_BSS_ROLE(priv_num) == MWIFIEX_BSS_ROLE_STA && priv_num->media_connected)) return 1; } } return 0; } static inline u8 mwifiex_is_tdls_link_setup(u8 status) { switch (status) { case TDLS_SETUP_COMPLETE: case TDLS_CHAN_SWITCHING: case TDLS_IN_BASE_CHAN: case TDLS_IN_OFF_CHAN: return true; default: break; } return false; } /* Disable platform specific wakeup interrupt */ static inline void mwifiex_disable_wake(struct mwifiex_adapter *adapter) { if (adapter->irq_wakeup >= 0) { disable_irq_wake(adapter->irq_wakeup); disable_irq(adapter->irq_wakeup); if (adapter->wake_by_wifi) /* Undo our disable, since interrupt handler already * did this. */ enable_irq(adapter->irq_wakeup); } } /* Enable platform specific wakeup interrupt */ static inline void mwifiex_enable_wake(struct mwifiex_adapter *adapter) { /* Enable platform specific wakeup interrupt */ if (adapter->irq_wakeup >= 0) { adapter->wake_by_wifi = false; enable_irq(adapter->irq_wakeup); enable_irq_wake(adapter->irq_wakeup); } } int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, u32 func_init_shutdown); int mwifiex_add_card(void *card, struct completion *fw_done, const struct mwifiex_if_ops *if_ops, u8 iface_type, struct device *dev); int mwifiex_remove_card(struct mwifiex_adapter *adapter); void mwifiex_get_version(struct mwifiex_adapter *adapter, char *version, int maxlen); int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, struct mwifiex_multicast_list *mcast_list); int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, struct net_device *dev); int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_queued); int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, struct cfg80211_ssid *req_ssid); int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); int mwifiex_enable_hs(struct mwifiex_adapter *adapter); int mwifiex_disable_auto_ds(struct mwifiex_private *priv); int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate); int mwifiex_request_scan(struct mwifiex_private *priv, struct cfg80211_ssid *req_ssid); int mwifiex_scan_networks(struct mwifiex_private *priv, const struct mwifiex_user_scan_cfg *user_scan_in); int mwifiex_set_radio(struct mwifiex_private *priv, u8 option); int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, const u8 *key, int key_len, u8 key_index, const u8 *mac_addr, int disable); int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len); int mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel); int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, struct ieee80211_channel *chan, unsigned int duration); int mwifiex_get_stats_info(struct mwifiex_private *priv, struct mwifiex_ds_get_stats *log); int mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type, u32 reg_offset, u32 reg_value); int mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type, u32 reg_offset, u32 *value); int mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes, u8 *value); int mwifiex_set_11n_httx_cfg(struct mwifiex_private *priv, int data); int mwifiex_get_11n_httx_cfg(struct mwifiex_private *priv, int *data); int mwifiex_set_tx_rate_cfg(struct mwifiex_private *priv, int tx_rate_index); int mwifiex_get_tx_rate_cfg(struct mwifiex_private *priv, int *tx_rate_index); int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode); int mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version, int max_len); int mwifiex_set_tx_power(struct mwifiex_private *priv, struct mwifiex_power_cfg *power_cfg); int mwifiex_main_process(struct mwifiex_adapter *); int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb); int mwifiex_get_bss_info(struct mwifiex_private *, struct mwifiex_bss_info *); int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, struct cfg80211_bss *bss, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, struct mwifiex_bssdescriptor *bss_entry); int mwifiex_check_network_compatibility(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type); u8 mwifiex_get_chan_type(struct mwifiex_private *priv); struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params); int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev); void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config); int mwifiex_add_wowlan_magic_pkt_filter(struct mwifiex_adapter *adapter); int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, struct cfg80211_beacon_data *data); int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); const u8 *mwifiex_11d_code_2_region(u8 code); void mwifiex_uap_set_channel(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_chan_def chandef); int mwifiex_config_start_uap(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg); void mwifiex_config_uap_11d(struct mwifiex_private *priv, struct cfg80211_beacon_data *beacon_data); void mwifiex_init_11h_params(struct mwifiex_private *priv); int mwifiex_is_11h_active(struct mwifiex_private *priv); int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag); void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv); int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv, struct device_node *node, const char *prefix); void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv); extern const struct ethtool_ops mwifiex_ethtool_ops; void mwifiex_del_all_sta_list(struct mwifiex_private *priv); void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac); void mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies, int ies_len, struct mwifiex_sta_node *node); struct mwifiex_sta_node * mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac); struct mwifiex_sta_node * mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac); u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv); u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv); int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *extra_ies, size_t extra_ies_len); int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *extra_ies, size_t extra_ies_len); void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, u8 *buf, int len); int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action); int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac); int mwifiex_get_tdls_list(struct mwifiex_private *priv, struct tdls_peer_info *buf); void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv); bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv); u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band, u32 pri_chan, u8 chan_bw); int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter); int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb); void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv); void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv, const u8 *mac, u8 link_status); void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv, u8 *mac, s8 snr, s8 nflr); void mwifiex_check_auto_tdls(struct timer_list *t); void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac); void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv); void mwifiex_clean_auto_tdls(struct mwifiex_private *priv); int mwifiex_config_tdls_enable(struct mwifiex_private *priv); int mwifiex_config_tdls_disable(struct mwifiex_private *priv); int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv); int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac); int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac, u8 primary_chan, u8 second_chan_offset, u8 band); int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf); int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv, struct sk_buff *skb); void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, void *event_body); struct sk_buff * mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, struct sk_buff *skb, u8 flag, u64 *cookie); void mwifiex_dfs_cac_work_queue(struct work_struct *work); void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work); void mwifiex_abort_cac(struct mwifiex_private *priv); int mwifiex_stop_radar_detection(struct mwifiex_private *priv, struct cfg80211_chan_def *chandef); int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv, struct sk_buff *skb); void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr, s8 nflr); void mwifiex_hist_data_reset(struct mwifiex_private *priv); void mwifiex_hist_data_add(struct mwifiex_private *priv, u8 rx_rate, s8 snr, s8 nflr); u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter); void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_fw_dump_event(struct mwifiex_private *priv); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, int cmd_type, struct mwifiex_ds_wakeup_reason *wakeup_reason); int mwifiex_get_chan_info(struct mwifiex_private *priv, struct mwifiex_channel_band *channel_band); int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, struct host_cmd_ds_command *resp, struct host_cmd_ds_wakeup_reason *wakeup_reason); void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter); void mwifiex_11n_delba(struct mwifiex_private *priv, int tid); int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy); void mwifiex_process_tx_pause_event(struct mwifiex_private *priv, struct sk_buff *event); void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, struct sk_buff *event_skb); void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter); int mwifiex_set_mac_address(struct mwifiex_private *priv, struct net_device *dev, bool external, u8 *new_mac); void mwifiex_devdump_tmo_func(unsigned long function_context); #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); void mwifiex_debugfs_remove(void); void mwifiex_dev_debugfs_init(struct mwifiex_private *priv); void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv); #endif int mwifiex_reinit_sw(struct mwifiex_adapter *adapter); int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter); #endif /* !_MWIFIEX_MAIN_H_ */ |
117 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 | /* * * dvb_ringbuffer.c: ring buffer implementation for the dvb driver * * Copyright (C) 2003 Oliver Endriss * Copyright (C) 2004 Andrew de Quincey * * based on code originally found in av7110.c & dvb_ci.c: * Copyright (C) 1999-2003 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/uaccess.h> #include <media/dvb_ringbuffer.h> #define PKT_READY 0 #define PKT_DISPOSED 1 void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len) { rbuf->pread=rbuf->pwrite=0; rbuf->data=data; rbuf->size=len; rbuf->error=0; init_waitqueue_head(&rbuf->queue); spin_lock_init(&(rbuf->lock)); } int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf) { /* smp_load_acquire() to load write pointer on reader side * this pairs with smp_store_release() in dvb_ringbuffer_write(), * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() * * for memory barriers also see Documentation/core-api/circular-buffers.rst */ return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); } ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) { ssize_t free; /* READ_ONCE() to load read pointer on writer side * this pairs with smp_store_release() in dvb_ringbuffer_read(), * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), * or dvb_ringbuffer_reset() */ free = READ_ONCE(rbuf->pread) - rbuf->pwrite; if (free <= 0) free += rbuf->size; return free-1; } ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) { ssize_t avail; /* smp_load_acquire() to load write pointer on reader side * this pairs with smp_store_release() in dvb_ringbuffer_write(), * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() */ avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; if (avail < 0) avail += rbuf->size; return avail; } void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf) { /* dvb_ringbuffer_flush() counts as read operation * smp_load_acquire() to load write pointer * smp_store_release() to update read pointer, this ensures that the * correct pointer is visible for subsequent dvb_ringbuffer_free() * calls on other cpu cores */ smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); rbuf->error = 0; } EXPORT_SYMBOL(dvb_ringbuffer_flush); void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf) { /* dvb_ringbuffer_reset() counts as read and write operation * smp_store_release() to update read pointer */ smp_store_release(&rbuf->pread, 0); /* smp_store_release() to update write pointer */ smp_store_release(&rbuf->pwrite, 0); rbuf->error = 0; } void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf) { unsigned long flags; spin_lock_irqsave(&rbuf->lock, flags); dvb_ringbuffer_flush(rbuf); spin_unlock_irqrestore(&rbuf->lock, flags); wake_up(&rbuf->queue); } ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, size_t len) { size_t todo = len; size_t split; split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; if (split > 0) { if (copy_to_user(buf, rbuf->data+rbuf->pread, split)) return -EFAULT; buf += split; todo -= split; /* smp_store_release() for read pointer update to ensure * that buf is not overwritten until read is complete, * this pairs with READ_ONCE() in dvb_ringbuffer_free() */ smp_store_release(&rbuf->pread, 0); } if (copy_to_user(buf, rbuf->data+rbuf->pread, todo)) return -EFAULT; /* smp_store_release() to update read pointer, see above */ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); return len; } void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) { size_t todo = len; size_t split; split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0; if (split > 0) { memcpy(buf, rbuf->data+rbuf->pread, split); buf += split; todo -= split; /* smp_store_release() for read pointer update to ensure * that buf is not overwritten until read is complete, * this pairs with READ_ONCE() in dvb_ringbuffer_free() */ smp_store_release(&rbuf->pread, 0); } memcpy(buf, rbuf->data+rbuf->pread, todo); /* smp_store_release() to update read pointer, see above */ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); } ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t len) { size_t todo = len; size_t split; split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0; if (split > 0) { memcpy(rbuf->data+rbuf->pwrite, buf, split); buf += split; todo -= split; /* smp_store_release() for write pointer update to ensure that * written data is visible on other cpu cores before the pointer * update, this pairs with smp_load_acquire() in * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() */ smp_store_release(&rbuf->pwrite, 0); } memcpy(rbuf->data+rbuf->pwrite, buf, todo); /* smp_store_release() for write pointer update, see above */ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); return len; } ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, const u8 __user *buf, size_t len) { int status; size_t todo = len; size_t split; split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0; if (split > 0) { status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split); if (status) return len - todo; buf += split; todo -= split; /* smp_store_release() for write pointer update to ensure that * written data is visible on other cpu cores before the pointer * update, this pairs with smp_load_acquire() in * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() */ smp_store_release(&rbuf->pwrite, 0); } status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo); if (status) return len - todo; /* smp_store_release() for write pointer update, see above */ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); return len; } ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t len) { int status; ssize_t oldpwrite = rbuf->pwrite; DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8); DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff); DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_READY); status = dvb_ringbuffer_write(rbuf, buf, len); if (status < 0) rbuf->pwrite = oldpwrite; return status; } ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx, int offset, u8 __user *buf, size_t len) { size_t todo; size_t split; size_t pktlen; pktlen = rbuf->data[idx] << 8; pktlen |= rbuf->data[(idx + 1) % rbuf->size]; if (offset > pktlen) return -EINVAL; if ((offset + len) > pktlen) len = pktlen - offset; idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size; todo = len; split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0; if (split > 0) { if (copy_to_user(buf, rbuf->data+idx, split)) return -EFAULT; buf += split; todo -= split; idx = 0; } if (copy_to_user(buf, rbuf->data+idx, todo)) return -EFAULT; return len; } ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, int offset, u8* buf, size_t len) { size_t todo; size_t split; size_t pktlen; pktlen = rbuf->data[idx] << 8; pktlen |= rbuf->data[(idx + 1) % rbuf->size]; if (offset > pktlen) return -EINVAL; if ((offset + len) > pktlen) len = pktlen - offset; idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size; todo = len; split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0; if (split > 0) { memcpy(buf, rbuf->data+idx, split); buf += split; todo -= split; idx = 0; } memcpy(buf, rbuf->data+idx, todo); return len; } void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx) { size_t pktlen; rbuf->data[(idx + 2) % rbuf->size] = PKT_DISPOSED; // clean up disposed packets while(dvb_ringbuffer_avail(rbuf) > DVB_RINGBUFFER_PKTHDRSIZE) { if (DVB_RINGBUFFER_PEEK(rbuf, 2) == PKT_DISPOSED) { pktlen = DVB_RINGBUFFER_PEEK(rbuf, 0) << 8; pktlen |= DVB_RINGBUFFER_PEEK(rbuf, 1); DVB_RINGBUFFER_SKIP(rbuf, pktlen + DVB_RINGBUFFER_PKTHDRSIZE); } else { // first packet is not disposed, so we stop cleaning now break; } } } ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen) { int consumed; int curpktlen; int curpktstatus; if (idx == -1) { idx = rbuf->pread; } else { curpktlen = rbuf->data[idx] << 8; curpktlen |= rbuf->data[(idx + 1) % rbuf->size]; idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; } consumed = (idx - rbuf->pread); if (consumed < 0) consumed += rbuf->size; while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) { curpktlen = rbuf->data[idx] << 8; curpktlen |= rbuf->data[(idx + 1) % rbuf->size]; curpktstatus = rbuf->data[(idx + 2) % rbuf->size]; if (curpktstatus == PKT_READY) { *pktlen = curpktlen; return idx; } consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE; idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; } // no packets available return -1; } EXPORT_SYMBOL(dvb_ringbuffer_init); EXPORT_SYMBOL(dvb_ringbuffer_empty); EXPORT_SYMBOL(dvb_ringbuffer_free); EXPORT_SYMBOL(dvb_ringbuffer_avail); EXPORT_SYMBOL(dvb_ringbuffer_flush_spinlock_wakeup); EXPORT_SYMBOL(dvb_ringbuffer_read_user); EXPORT_SYMBOL(dvb_ringbuffer_read); EXPORT_SYMBOL(dvb_ringbuffer_write); EXPORT_SYMBOL(dvb_ringbuffer_write_user); |
60 24 73 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 | // SPDX-License-Identifier: GPL-2.0 // Generated by scripts/atomic/gen-atomic-long.sh // DO NOT MODIFY THIS FILE DIRECTLY #ifndef _LINUX_ATOMIC_LONG_H #define _LINUX_ATOMIC_LONG_H #include <linux/compiler.h> #include <asm/types.h> #ifdef CONFIG_64BIT typedef atomic64_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) #define atomic_long_cond_read_acquire atomic64_cond_read_acquire #define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed #else typedef atomic_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) #define atomic_long_cond_read_acquire atomic_cond_read_acquire #define atomic_long_cond_read_relaxed atomic_cond_read_relaxed #endif /** * raw_atomic_long_read() - atomic load with relaxed ordering * @v: pointer to atomic_long_t * * Atomically loads the value of @v with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_read() elsewhere. * * Return: The value loaded from @v. */ static __always_inline long raw_atomic_long_read(const atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_read(v); #else return raw_atomic_read(v); #endif } /** * raw_atomic_long_read_acquire() - atomic load with acquire ordering * @v: pointer to atomic_long_t * * Atomically loads the value of @v with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_read_acquire() elsewhere. * * Return: The value loaded from @v. */ static __always_inline long raw_atomic_long_read_acquire(const atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_read_acquire(v); #else return raw_atomic_read_acquire(v); #endif } /** * raw_atomic_long_set() - atomic set with relaxed ordering * @v: pointer to atomic_long_t * @i: long value to assign * * Atomically sets @v to @i with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_set() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_set(atomic_long_t *v, long i) { #ifdef CONFIG_64BIT raw_atomic64_set(v, i); #else raw_atomic_set(v, i); #endif } /** * raw_atomic_long_set_release() - atomic set with release ordering * @v: pointer to atomic_long_t * @i: long value to assign * * Atomically sets @v to @i with release ordering. * * Safe to use in noinstr code; prefer atomic_long_set_release() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_set_release(atomic_long_t *v, long i) { #ifdef CONFIG_64BIT raw_atomic64_set_release(v, i); #else raw_atomic_set_release(v, i); #endif } /** * raw_atomic_long_add() - atomic add with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_add() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_add(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_add(i, v); #else raw_atomic_add(i, v); #endif } /** * raw_atomic_long_add_return() - atomic add with full ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_add_return() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_add_return(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_return(i, v); #else return raw_atomic_add_return(i, v); #endif } /** * raw_atomic_long_add_return_acquire() - atomic add with acquire ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_add_return_acquire() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_add_return_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_return_acquire(i, v); #else return raw_atomic_add_return_acquire(i, v); #endif } /** * raw_atomic_long_add_return_release() - atomic add with release ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_add_return_release() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_add_return_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_return_release(i, v); #else return raw_atomic_add_return_release(i, v); #endif } /** * raw_atomic_long_add_return_relaxed() - atomic add with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_add_return_relaxed() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_return_relaxed(i, v); #else return raw_atomic_add_return_relaxed(i, v); #endif } /** * raw_atomic_long_fetch_add() - atomic add with full ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_add() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_add(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_add(i, v); #else return raw_atomic_fetch_add(i, v); #endif } /** * raw_atomic_long_fetch_add_acquire() - atomic add with acquire ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_add_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_add_acquire(i, v); #else return raw_atomic_fetch_add_acquire(i, v); #endif } /** * raw_atomic_long_fetch_add_release() - atomic add with release ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_add_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_add_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_add_release(i, v); #else return raw_atomic_fetch_add_release(i, v); #endif } /** * raw_atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_add_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_add_relaxed(i, v); #else return raw_atomic_fetch_add_relaxed(i, v); #endif } /** * raw_atomic_long_sub() - atomic subtract with relaxed ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_sub() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_sub(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_sub(i, v); #else raw_atomic_sub(i, v); #endif } /** * raw_atomic_long_sub_return() - atomic subtract with full ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_sub_return() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_sub_return(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_sub_return(i, v); #else return raw_atomic_sub_return(i, v); #endif } /** * raw_atomic_long_sub_return_acquire() - atomic subtract with acquire ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_sub_return_acquire() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_sub_return_acquire(i, v); #else return raw_atomic_sub_return_acquire(i, v); #endif } /** * raw_atomic_long_sub_return_release() - atomic subtract with release ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_sub_return_release() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_sub_return_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_sub_return_release(i, v); #else return raw_atomic_sub_return_release(i, v); #endif } /** * raw_atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_sub_return_relaxed() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_sub_return_relaxed(i, v); #else return raw_atomic_sub_return_relaxed(i, v); #endif } /** * raw_atomic_long_fetch_sub() - atomic subtract with full ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_sub() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_sub(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_sub(i, v); #else return raw_atomic_fetch_sub(i, v); #endif } /** * raw_atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_sub_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_sub_acquire(i, v); #else return raw_atomic_fetch_sub_acquire(i, v); #endif } /** * raw_atomic_long_fetch_sub_release() - atomic subtract with release ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_sub_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_sub_release(i, v); #else return raw_atomic_fetch_sub_release(i, v); #endif } /** * raw_atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_sub_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_sub_relaxed(i, v); #else return raw_atomic_fetch_sub_relaxed(i, v); #endif } /** * raw_atomic_long_inc() - atomic increment with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_inc() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_inc(atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_inc(v); #else raw_atomic_inc(v); #endif } /** * raw_atomic_long_inc_return() - atomic increment with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_inc_return() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_inc_return(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_inc_return(v); #else return raw_atomic_inc_return(v); #endif } /** * raw_atomic_long_inc_return_acquire() - atomic increment with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_inc_return_acquire() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_inc_return_acquire(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_inc_return_acquire(v); #else return raw_atomic_inc_return_acquire(v); #endif } /** * raw_atomic_long_inc_return_release() - atomic increment with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_inc_return_release() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_inc_return_release(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_inc_return_release(v); #else return raw_atomic_inc_return_release(v); #endif } /** * raw_atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_inc_return_relaxed() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_inc_return_relaxed(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_inc_return_relaxed(v); #else return raw_atomic_inc_return_relaxed(v); #endif } /** * raw_atomic_long_fetch_inc() - atomic increment with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_inc() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_inc(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_inc(v); #else return raw_atomic_fetch_inc(v); #endif } /** * raw_atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_inc_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_inc_acquire(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_inc_acquire(v); #else return raw_atomic_fetch_inc_acquire(v); #endif } /** * raw_atomic_long_fetch_inc_release() - atomic increment with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_inc_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_inc_release(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_inc_release(v); #else return raw_atomic_fetch_inc_release(v); #endif } /** * raw_atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_inc_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_inc_relaxed(v); #else return raw_atomic_fetch_inc_relaxed(v); #endif } /** * raw_atomic_long_dec() - atomic decrement with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_dec() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_dec(atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_dec(v); #else raw_atomic_dec(v); #endif } /** * raw_atomic_long_dec_return() - atomic decrement with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_dec_return() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_dec_return(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_dec_return(v); #else return raw_atomic_dec_return(v); #endif } /** * raw_atomic_long_dec_return_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_dec_return_acquire() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_dec_return_acquire(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_dec_return_acquire(v); #else return raw_atomic_dec_return_acquire(v); #endif } /** * raw_atomic_long_dec_return_release() - atomic decrement with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_dec_return_release() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_dec_return_release(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_dec_return_release(v); #else return raw_atomic_dec_return_release(v); #endif } /** * raw_atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_dec_return_relaxed() elsewhere. * * Return: The updated value of @v. */ static __always_inline long raw_atomic_long_dec_return_relaxed(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_dec_return_relaxed(v); #else return raw_atomic_dec_return_relaxed(v); #endif } /** * raw_atomic_long_fetch_dec() - atomic decrement with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_dec() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_dec(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_dec(v); #else return raw_atomic_fetch_dec(v); #endif } /** * raw_atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_dec_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_dec_acquire(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_dec_acquire(v); #else return raw_atomic_fetch_dec_acquire(v); #endif } /** * raw_atomic_long_fetch_dec_release() - atomic decrement with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_dec_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_dec_release(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_dec_release(v); #else return raw_atomic_fetch_dec_release(v); #endif } /** * raw_atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_dec_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_dec_relaxed(v); #else return raw_atomic_fetch_dec_relaxed(v); #endif } /** * raw_atomic_long_and() - atomic bitwise AND with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_and() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_and(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_and(i, v); #else raw_atomic_and(i, v); #endif } /** * raw_atomic_long_fetch_and() - atomic bitwise AND with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_and() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_and(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_and(i, v); #else return raw_atomic_fetch_and(i, v); #endif } /** * raw_atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_and_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_and_acquire(i, v); #else return raw_atomic_fetch_and_acquire(i, v); #endif } /** * raw_atomic_long_fetch_and_release() - atomic bitwise AND with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_and_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_and_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_and_release(i, v); #else return raw_atomic_fetch_and_release(i, v); #endif } /** * raw_atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_and_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_and_relaxed(i, v); #else return raw_atomic_fetch_and_relaxed(i, v); #endif } /** * raw_atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_andnot() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_andnot(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_andnot(i, v); #else raw_atomic_andnot(i, v); #endif } /** * raw_atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_andnot() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_andnot(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_andnot(i, v); #else return raw_atomic_fetch_andnot(i, v); #endif } /** * raw_atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_andnot_acquire(i, v); #else return raw_atomic_fetch_andnot_acquire(i, v); #endif } /** * raw_atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_andnot_release(i, v); #else return raw_atomic_fetch_andnot_release(i, v); #endif } /** * raw_atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_andnot_relaxed(i, v); #else return raw_atomic_fetch_andnot_relaxed(i, v); #endif } /** * raw_atomic_long_or() - atomic bitwise OR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_or() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_or(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_or(i, v); #else raw_atomic_or(i, v); #endif } /** * raw_atomic_long_fetch_or() - atomic bitwise OR with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_or() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_or(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_or(i, v); #else return raw_atomic_fetch_or(i, v); #endif } /** * raw_atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_or_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_or_acquire(i, v); #else return raw_atomic_fetch_or_acquire(i, v); #endif } /** * raw_atomic_long_fetch_or_release() - atomic bitwise OR with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_or_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_or_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_or_release(i, v); #else return raw_atomic_fetch_or_release(i, v); #endif } /** * raw_atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_or_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_or_relaxed(i, v); #else return raw_atomic_fetch_or_relaxed(i, v); #endif } /** * raw_atomic_long_xor() - atomic bitwise XOR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_xor() elsewhere. * * Return: Nothing. */ static __always_inline void raw_atomic_long_xor(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT raw_atomic64_xor(i, v); #else raw_atomic_xor(i, v); #endif } /** * raw_atomic_long_fetch_xor() - atomic bitwise XOR with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_xor() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_xor(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_xor(i, v); #else return raw_atomic_fetch_xor(i, v); #endif } /** * raw_atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_xor_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_xor_acquire(i, v); #else return raw_atomic_fetch_xor_acquire(i, v); #endif } /** * raw_atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_xor_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_xor_release(i, v); #else return raw_atomic_fetch_xor_release(i, v); #endif } /** * raw_atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_fetch_xor_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_xor_relaxed(i, v); #else return raw_atomic_fetch_xor_relaxed(i, v); #endif } /** * raw_atomic_long_xchg() - atomic exchange with full ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with full ordering. * * Safe to use in noinstr code; prefer atomic_long_xchg() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_xchg(atomic_long_t *v, long new) { #ifdef CONFIG_64BIT return raw_atomic64_xchg(v, new); #else return raw_atomic_xchg(v, new); #endif } /** * raw_atomic_long_xchg_acquire() - atomic exchange with acquire ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_xchg_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_xchg_acquire(atomic_long_t *v, long new) { #ifdef CONFIG_64BIT return raw_atomic64_xchg_acquire(v, new); #else return raw_atomic_xchg_acquire(v, new); #endif } /** * raw_atomic_long_xchg_release() - atomic exchange with release ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with release ordering. * * Safe to use in noinstr code; prefer atomic_long_xchg_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_xchg_release(atomic_long_t *v, long new) { #ifdef CONFIG_64BIT return raw_atomic64_xchg_release(v, new); #else return raw_atomic_xchg_release(v, new); #endif } /** * raw_atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_xchg_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new) { #ifdef CONFIG_64BIT return raw_atomic64_xchg_relaxed(v, new); #else return raw_atomic_xchg_relaxed(v, new); #endif } /** * raw_atomic_long_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_cmpxchg(v, old, new); #else return raw_atomic_cmpxchg(v, old, new); #endif } /** * raw_atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_cmpxchg_acquire(v, old, new); #else return raw_atomic_cmpxchg_acquire(v, old, new); #endif } /** * raw_atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_cmpxchg_release(v, old, new); #else return raw_atomic_cmpxchg_release(v, old, new); #endif } /** * raw_atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_cmpxchg_relaxed(v, old, new); #else return raw_atomic_cmpxchg_relaxed(v, old, new); #endif } /** * raw_atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_try_cmpxchg(v, (s64 *)old, new); #else return raw_atomic_try_cmpxchg(v, (int *)old, new); #endif } /** * raw_atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); #else return raw_atomic_try_cmpxchg_acquire(v, (int *)old, new); #endif } /** * raw_atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_try_cmpxchg_release(v, (s64 *)old, new); #else return raw_atomic_try_cmpxchg_release(v, (int *)old, new); #endif } /** * raw_atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { #ifdef CONFIG_64BIT return raw_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); #else return raw_atomic_try_cmpxchg_relaxed(v, (int *)old, new); #endif } /** * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_sub_and_test() elsewhere. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool raw_atomic_long_sub_and_test(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_sub_and_test(i, v); #else return raw_atomic_sub_and_test(i, v); #endif } /** * raw_atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_dec_and_test() elsewhere. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool raw_atomic_long_dec_and_test(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_dec_and_test(v); #else return raw_atomic_dec_and_test(v); #endif } /** * raw_atomic_long_inc_and_test() - atomic increment and test if zero with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_inc_and_test() elsewhere. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool raw_atomic_long_inc_and_test(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_inc_and_test(v); #else return raw_atomic_inc_and_test(v); #endif } /** * raw_atomic_long_add_negative() - atomic add and test if negative with full ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with full ordering. * * Safe to use in noinstr code; prefer atomic_long_add_negative() elsewhere. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool raw_atomic_long_add_negative(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_negative(i, v); #else return raw_atomic_add_negative(i, v); #endif } /** * raw_atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Safe to use in noinstr code; prefer atomic_long_add_negative_acquire() elsewhere. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_negative_acquire(i, v); #else return raw_atomic_add_negative_acquire(i, v); #endif } /** * raw_atomic_long_add_negative_release() - atomic add and test if negative with release ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with release ordering. * * Safe to use in noinstr code; prefer atomic_long_add_negative_release() elsewhere. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool raw_atomic_long_add_negative_release(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_negative_release(i, v); #else return raw_atomic_add_negative_release(i, v); #endif } /** * raw_atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Safe to use in noinstr code; prefer atomic_long_add_negative_relaxed() elsewhere. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_add_negative_relaxed(i, v); #else return raw_atomic_add_negative_relaxed(i, v); #endif } /** * raw_atomic_long_fetch_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic_long_t * @a: long value to add * @u: long value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere. * * Return: The original value of @v. */ static __always_inline long raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { #ifdef CONFIG_64BIT return raw_atomic64_fetch_add_unless(v, a, u); #else return raw_atomic_fetch_add_unless(v, a, u); #endif } /** * raw_atomic_long_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic_long_t * @a: long value to add * @u: long value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool raw_atomic_long_add_unless(atomic_long_t *v, long a, long u) { #ifdef CONFIG_64BIT return raw_atomic64_add_unless(v, a, u); #else return raw_atomic_add_unless(v, a, u); #endif } /** * raw_atomic_long_inc_not_zero() - atomic increment unless zero with full ordering * @v: pointer to atomic_long_t * * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool raw_atomic_long_inc_not_zero(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_inc_not_zero(v); #else return raw_atomic_inc_not_zero(v); #endif } /** * raw_atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering * @v: pointer to atomic_long_t * * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool raw_atomic_long_inc_unless_negative(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_inc_unless_negative(v); #else return raw_atomic_inc_unless_negative(v); #endif } /** * raw_atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering * @v: pointer to atomic_long_t * * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool raw_atomic_long_dec_unless_positive(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_dec_unless_positive(v); #else return raw_atomic_dec_unless_positive(v); #endif } /** * raw_atomic_long_dec_if_positive() - atomic decrement if positive with full ordering * @v: pointer to atomic_long_t * * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere. * * Return: The old value of (@v - 1), regardless of whether @v was updated. */ static __always_inline long raw_atomic_long_dec_if_positive(atomic_long_t *v) { #ifdef CONFIG_64BIT return raw_atomic64_dec_if_positive(v); #else return raw_atomic_dec_if_positive(v); #endif } #endif /* _LINUX_ATOMIC_LONG_H */ // eadf183c3600b8b92b91839dd3be6bcc560c752d |
3223 8 3283 3294 3261 93 211 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * kref.h - library routines for handling generic reference counted objects * * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Corp. * * based on kobject.h which was: * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> * Copyright (C) 2002-2003 Open Source Development Labs */ #ifndef _KREF_H_ #define _KREF_H_ #include <linux/spinlock.h> #include <linux/refcount.h> struct kref { refcount_t refcount; }; #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), } /** * kref_init - initialize object. * @kref: object in question. */ static inline void kref_init(struct kref *kref) { refcount_set(&kref->refcount, 1); } static inline unsigned int kref_read(const struct kref *kref) { return refcount_read(&kref->refcount); } /** * kref_get - increment refcount for object. * @kref: object. */ static inline void kref_get(struct kref *kref) { refcount_inc(&kref->refcount); } /** * kref_put - Decrement refcount for object * @kref: Object * @release: Pointer to the function that will clean up the object when the * last reference to the object is released. * * Decrement the refcount, and if 0, call @release. The caller may not * pass NULL or kfree() as the release function. * * Return: 1 if this call removed the object, otherwise return 0. Beware, * if this function returns 0, another caller may have removed the object * by the time this function returns. The return value is only certain * if you want to see if the object is definitely released. */ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) { if (refcount_dec_and_test(&kref->refcount)) { release(kref); return 1; } return 0; } /** * kref_put_mutex - Decrement refcount for object * @kref: Object * @release: Pointer to the function that will clean up the object when the * last reference to the object is released. * @mutex: Mutex which protects the release function. * * This variant of kref_lock() calls the @release function with the @mutex * held. The @release function will release the mutex. */ static inline int kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), struct mutex *mutex) { if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) { release(kref); return 1; } return 0; } /** * kref_put_lock - Decrement refcount for object * @kref: Object * @release: Pointer to the function that will clean up the object when the * last reference to the object is released. * @lock: Spinlock which protects the release function. * * This variant of kref_lock() calls the @release function with the @lock * held. The @release function will release the lock. */ static inline int kref_put_lock(struct kref *kref, void (*release)(struct kref *kref), spinlock_t *lock) { if (refcount_dec_and_lock(&kref->refcount, lock)) { release(kref); return 1; } return 0; } /** * kref_get_unless_zero - Increment refcount for object unless it is zero. * @kref: object. * * This function is intended to simplify locking around refcounting for * objects that can be looked up from a lookup structure, and which are * removed from that lookup structure in the object destructor. * Operations on such objects require at least a read lock around * lookup + kref_get, and a write lock around kref_put + remove from lookup * structure. Furthermore, RCU implementations become extremely tricky. * With a lookup followed by a kref_get_unless_zero *with return value check* * locking in the kref_put path can be deferred to the actual removal from * the lookup structure and RCU lookups become trivial. * * Return: non-zero if the increment succeeded. Otherwise return 0. */ static inline int __must_check kref_get_unless_zero(struct kref *kref) { return refcount_inc_not_zero(&kref->refcount); } #endif /* _KREF_H_ */ |
2 18 18 18 2 3 2 17 18 2 2 3 16 17 18 18 2 2 18 13 12 16 18 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H #include <linux/blk-mq.h> #include "blk-stat.h" struct blk_mq_tag_set; struct blk_mq_ctxs { struct kobject kobj; struct blk_mq_ctx __percpu *queue_ctx; }; /** * struct blk_mq_ctx - State for a software queue facing the submitting CPUs */ struct blk_mq_ctx { struct { spinlock_t lock; struct list_head rq_lists[HCTX_MAX_TYPES]; } ____cacheline_aligned_in_smp; unsigned int cpu; unsigned short index_hw[HCTX_MAX_TYPES]; struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; struct request_queue *queue; struct blk_mq_ctxs *ctxs; struct kobject kobj; } ____cacheline_aligned_in_smp; enum { BLK_MQ_NO_TAG = -1U, BLK_MQ_TAG_MIN = 1, BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, }; #define BLK_MQ_CPU_WORK_BATCH (8) typedef unsigned int __bitwise blk_insert_t; #define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01) void blk_mq_submit_bio(struct bio *bio); int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, unsigned int flags); void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, bool); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); void blk_mq_put_rq_ref(struct request *rq); /* * Internal helpers for allocating/freeing the request map */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); void blk_mq_free_rq_map(struct blk_mq_tags *tags); struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth); void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); /* * CPU -> queue mappings */ extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); /* * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue * @q: request queue * @type: the hctx type index * @cpu: CPU */ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, enum hctx_type type, unsigned int cpu) { return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); } static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) { enum hctx_type type = HCTX_TYPE_DEFAULT; /* * The caller ensure that if REQ_POLLED, poll must be enabled. */ if (opf & REQ_POLLED) type = HCTX_TYPE_POLL; else if ((opf & REQ_OP_MASK) == REQ_OP_READ) type = HCTX_TYPE_READ; return type; } /* * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED). * @ctx: software queue cpu ctx */ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf, struct blk_mq_ctx *ctx) { return ctx->hctxs[blk_mq_get_hctx_type(opf)]; } /* * sysfs helpers */ extern void blk_mq_sysfs_init(struct request_queue *q); extern void blk_mq_sysfs_deinit(struct request_queue *q); int blk_mq_sysfs_register(struct gendisk *disk); void blk_mq_sysfs_unregister(struct gendisk *disk); int blk_mq_sysfs_register_hctxs(struct request_queue *q); void blk_mq_sysfs_unregister_hctxs(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); void blk_mq_free_plug_rqs(struct blk_plug *plug); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_cancel_work_sync(struct request_queue *q); void blk_mq_release(struct request_queue *q); static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, unsigned int cpu) { return per_cpu_ptr(q->queue_ctx, cpu); } /* * This assumes per-cpu software queueing queues. They could be per-node * as well, for instance. For now this is hardcoded as-is. Note that we don't * care about preemption, since we know the ctx's are persistent. This does * mean that we can't rely on ctx always matching the currently running CPU. */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { return __blk_mq_get_ctx(q, raw_smp_processor_id()); } struct blk_mq_alloc_data { /* input parameter */ struct request_queue *q; blk_mq_req_flags_t flags; unsigned int shallow_depth; blk_opf_t cmd_flags; req_flags_t rq_flags; /* allocate multiple requests/tags in one go */ unsigned int nr_tags; struct rq_list *cached_rqs; /* input & output parameter */ struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; }; struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, unsigned int flags, int node); void blk_mq_free_tags(struct blk_mq_tags *tags); unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, unsigned int *offset); void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tags, unsigned int depth, bool can_grow); void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size); void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, void *priv); void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv); static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, struct blk_mq_hw_ctx *hctx) { if (!hctx) return &bt->ws[0]; return sbq_wait_ptr(bt, &hctx->wait_index); } void __blk_mq_tag_busy(struct blk_mq_hw_ctx *); void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) __blk_mq_tag_busy(hctx); } static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) __blk_mq_tag_idle(hctx); } static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, unsigned int tag) { return tag < tags->nr_reserved_tags; } static inline bool blk_mq_is_shared_tags(unsigned int flags) { return flags & BLK_MQ_F_TAG_HCTX_SHARED; } static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { if (data->rq_flags & RQF_SCHED_TAGS) return data->hctx->sched_tags; return data->hctx->tags; } static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) { /* Fast path: hardware queue is not stopped most of the time. */ if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return false; /* * This barrier is used to order adding of dispatch list before and * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier * in blk_mq_start_stopped_hw_queue() so that dispatch code could * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not * empty to avoid missing dispatching requests. */ smp_mb(); return test_bit(BLK_MQ_S_STOPPED, &hctx->state); } static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) { return hctx->nr_ctx && hctx->tags; } void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]); static inline void blk_mq_put_dispatch_budget(struct request_queue *q, int budget_token) { if (q->mq_ops->put_budget) q->mq_ops->put_budget(q, budget_token); } static inline int blk_mq_get_dispatch_budget(struct request_queue *q) { if (q->mq_ops->get_budget) return q->mq_ops->get_budget(q); return 0; } static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) { if (token < 0) return; if (rq->q->mq_ops->set_rq_budget_token) rq->q->mq_ops->set_rq_budget_token(rq, token); } static inline int blk_mq_get_rq_budget_token(struct request *rq) { if (rq->q->mq_ops->get_rq_budget_token) return rq->q->mq_ops->get_rq_budget_token(rq); return -1; } static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx, int val) { if (blk_mq_is_shared_tags(hctx->flags)) atomic_add(val, &hctx->queue->nr_active_requests_shared_tags); else atomic_add(val, &hctx->nr_active); } static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) { __blk_mq_add_active_requests(hctx, 1); } static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, int val) { if (blk_mq_is_shared_tags(hctx->flags)) atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); else atomic_sub(val, &hctx->nr_active); } static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) { __blk_mq_sub_active_requests(hctx, 1); } static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx, int val) { if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) __blk_mq_add_active_requests(hctx, val); } static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) { if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) __blk_mq_inc_active_requests(hctx); } static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, int val) { if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) __blk_mq_sub_active_requests(hctx, val); } static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) { if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) __blk_mq_dec_active_requests(hctx); } static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) { if (blk_mq_is_shared_tags(hctx->flags)) return atomic_read(&hctx->queue->nr_active_requests_shared_tags); return atomic_read(&hctx->nr_active); } static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) { blk_mq_dec_active_requests(hctx); blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); rq->tag = BLK_MQ_NO_TAG; } static inline void blk_mq_put_driver_tag(struct request *rq) { if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) return; __blk_mq_put_driver_tag(rq->mq_hctx, rq); } bool __blk_mq_alloc_driver_tag(struct request *rq); static inline bool blk_mq_get_driver_tag(struct request *rq) { if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) return false; return true; } static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) { int cpu; for_each_possible_cpu(cpu) qmap->mq_map[cpu] = 0; } /* Free all requests on the list */ static inline void blk_mq_free_requests(struct list_head *list) { while (!list_empty(list)) { struct request *rq = list_entry_rq(list->next); list_del_init(&rq->queuelist); blk_mq_free_request(rq); } } /* * For shared tag users, we track the number of currently active users * and attempt to provide a fair share of the tag depth for each of them. */ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt) { unsigned int depth, users; if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) return true; /* * Don't try dividing an ant */ if (bt->sb.depth == 1) return true; if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return true; } else { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return true; } users = READ_ONCE(hctx->tags->active_queues); if (!users) return true; /* * Allow at least some tags */ depth = max((bt->sb.depth + users - 1) / users, 4U); return __blk_mq_active_requests(hctx) < depth; } /* run the code block in @dispatch_ops with rcu/srcu read lock held */ #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ do { \ if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \ struct blk_mq_tag_set *__tag_set = (q)->tag_set; \ int srcu_idx; \ \ might_sleep_if(check_sleep); \ srcu_idx = srcu_read_lock(__tag_set->srcu); \ (dispatch_ops); \ srcu_read_unlock(__tag_set->srcu, srcu_idx); \ } else { \ rcu_read_lock(); \ (dispatch_ops); \ rcu_read_unlock(); \ } \ } while (0) #define blk_mq_run_dispatch_ops(q, dispatch_ops) \ __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \ static inline bool blk_mq_can_poll(struct request_queue *q) { return (q->limits.features & BLK_FEAT_POLL) && q->tag_set->map[HCTX_TYPE_POLL].nr_queues; } #endif |
3 63 3 3 3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 | // SPDX-License-Identifier: GPL-2.0+ /* * Fast-charge control for Apple "MFi" devices * * Copyright (C) 2019 Bastien Nocera <hadess@hadess.net> */ /* Standard include files */ #include <linux/module.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/usb.h> MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>"); MODULE_DESCRIPTION("Fast-charge control for Apple \"MFi\" devices"); MODULE_LICENSE("GPL"); #define TRICKLE_CURRENT_MA 0 #define FAST_CURRENT_MA 2500 #define APPLE_VENDOR_ID 0x05ac /* Apple */ /* The product ID is defined as starting with 0x12nn, as per the * "Choosing an Apple Device USB Configuration" section in * release R9 (2012) of the "MFi Accessory Hardware Specification" * * To distinguish an Apple device, a USB host can check the device * descriptor of attached USB devices for the following fields: * ■ Vendor ID: 0x05AC * ■ Product ID: 0x12nn * * Those checks will be done in .match() and .probe(). */ static const struct usb_device_id mfi_fc_id_table[] = { { .idVendor = APPLE_VENDOR_ID, .match_flags = USB_DEVICE_ID_MATCH_VENDOR }, {}, }; MODULE_DEVICE_TABLE(usb, mfi_fc_id_table); /* Driver-local specific stuff */ struct mfi_device { struct usb_device *udev; struct power_supply *battery; struct power_supply_desc battery_desc; int charge_type; }; static int apple_mfi_fc_set_charge_type(struct mfi_device *mfi, const union power_supply_propval *val) { int current_ma; int retval; __u8 request_type; if (mfi->charge_type == val->intval) { dev_dbg(&mfi->udev->dev, "charge type %d already set\n", mfi->charge_type); return 0; } switch (val->intval) { case POWER_SUPPLY_CHARGE_TYPE_TRICKLE: current_ma = TRICKLE_CURRENT_MA; break; case POWER_SUPPLY_CHARGE_TYPE_FAST: current_ma = FAST_CURRENT_MA; break; default: return -EINVAL; } request_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; retval = usb_control_msg(mfi->udev, usb_sndctrlpipe(mfi->udev, 0), 0x40, /* Vendor‐defined power request */ request_type, current_ma, /* wValue, current offset */ current_ma, /* wIndex, current offset */ NULL, 0, USB_CTRL_GET_TIMEOUT); if (retval) { dev_dbg(&mfi->udev->dev, "retval = %d\n", retval); return retval; } mfi->charge_type = val->intval; return 0; } static int apple_mfi_fc_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct mfi_device *mfi = power_supply_get_drvdata(psy); dev_dbg(&mfi->udev->dev, "prop: %d\n", psp); switch (psp) { case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = mfi->charge_type; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; default: return -ENODATA; } return 0; } static int apple_mfi_fc_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { struct mfi_device *mfi = power_supply_get_drvdata(psy); int ret; dev_dbg(&mfi->udev->dev, "prop: %d\n", psp); ret = pm_runtime_get_sync(&mfi->udev->dev); if (ret < 0) { pm_runtime_put_noidle(&mfi->udev->dev); return ret; } switch (psp) { case POWER_SUPPLY_PROP_CHARGE_TYPE: ret = apple_mfi_fc_set_charge_type(mfi, val); break; default: ret = -EINVAL; } pm_runtime_mark_last_busy(&mfi->udev->dev); pm_runtime_put_autosuspend(&mfi->udev->dev); return ret; } static int apple_mfi_fc_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { switch (psp) { case POWER_SUPPLY_PROP_CHARGE_TYPE: return 1; default: return 0; } } static enum power_supply_property apple_mfi_fc_properties[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_SCOPE }; static const struct power_supply_desc apple_mfi_fc_desc = { .name = "apple_mfi_fastcharge", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = apple_mfi_fc_properties, .num_properties = ARRAY_SIZE(apple_mfi_fc_properties), .get_property = apple_mfi_fc_get_property, .set_property = apple_mfi_fc_set_property, .property_is_writeable = apple_mfi_fc_property_is_writeable }; static bool mfi_fc_match(struct usb_device *udev) { int idProduct; idProduct = le16_to_cpu(udev->descriptor.idProduct); /* See comment above mfi_fc_id_table[] */ return (idProduct >= 0x1200 && idProduct <= 0x12ff); } static int mfi_fc_probe(struct usb_device *udev) { struct power_supply_config battery_cfg = {}; struct mfi_device *mfi = NULL; char *battery_name; int err; if (!mfi_fc_match(udev)) return -ENODEV; mfi = kzalloc(sizeof(struct mfi_device), GFP_KERNEL); if (!mfi) return -ENOMEM; battery_name = kasprintf(GFP_KERNEL, "apple_mfi_fastcharge_%d-%d", udev->bus->busnum, udev->devnum); if (!battery_name) { err = -ENOMEM; goto err_free_mfi; } mfi->battery_desc = apple_mfi_fc_desc; mfi->battery_desc.name = battery_name; battery_cfg.drv_data = mfi; mfi->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; mfi->battery = power_supply_register(&udev->dev, &mfi->battery_desc, &battery_cfg); if (IS_ERR(mfi->battery)) { dev_err(&udev->dev, "Can't register battery\n"); err = PTR_ERR(mfi->battery); goto err_free_name; } mfi->udev = usb_get_dev(udev); dev_set_drvdata(&udev->dev, mfi); return 0; err_free_name: kfree(battery_name); err_free_mfi: kfree(mfi); return err; } static void mfi_fc_disconnect(struct usb_device *udev) { struct mfi_device *mfi; mfi = dev_get_drvdata(&udev->dev); if (mfi->battery) power_supply_unregister(mfi->battery); kfree(mfi->battery_desc.name); dev_set_drvdata(&udev->dev, NULL); usb_put_dev(mfi->udev); kfree(mfi); } static struct usb_device_driver mfi_fc_driver = { .name = "apple-mfi-fastcharge", .probe = mfi_fc_probe, .disconnect = mfi_fc_disconnect, .id_table = mfi_fc_id_table, .match = mfi_fc_match, .generic_subclass = 1, }; static int __init mfi_fc_driver_init(void) { return usb_register_device_driver(&mfi_fc_driver, THIS_MODULE); } static void __exit mfi_fc_driver_exit(void) { usb_deregister_device_driver(&mfi_fc_driver); } module_init(mfi_fc_driver_init); module_exit(mfi_fc_driver_exit); |
6 2 4 4 4 4 4 4 4 2 4 2 3 4 3 4 4 1 9 7 1 6 6 6 4 4 4 4 4 9 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 | // SPDX-License-Identifier: GPL-2.0-or-later /* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (C) 2002-2003 TiVo Inc. * Copyright (C) 2017-2018 ASIX * Copyright (C) 2018 Aquantia Corp. */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/linkmode.h> #include "aqc111.h" #define DRIVER_NAME "aqc111" static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); if (unlikely(ret < size)) { netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); ret = ret < 0 ? ret : -ENODATA; } return ret; } static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); if (unlikely(ret < size)) { netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); ret = ret < 0 ? ret : -ENODATA; } return ret; } static int aqc111_read16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { int ret = 0; ret = aqc111_read_cmd_nopm(dev, cmd, value, index, sizeof(*data), data); le16_to_cpus(data); return ret; } static int aqc111_read16_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { int ret = 0; ret = aqc111_read_cmd(dev, cmd, value, index, sizeof(*data), data); le16_to_cpus(data); return ret; } static int __aqc111_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, u16 size, const void *data) { int err = -ENOMEM; void *buf = NULL; netdev_dbg(dev->net, "%s cmd=%#x reqtype=%#x value=%#x index=%#x size=%d\n", __func__, cmd, reqtype, value, index, size); if (data) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) goto out; } err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, (cmd == AQ_PHY_POWER) ? AQ_USB_PHY_SET_TIMEOUT : AQ_USB_SET_TIMEOUT); if (unlikely(err < 0)) netdev_warn(dev->net, "Failed to write(0x%x) reg index 0x%04x: %d\n", cmd, index, err); kfree(buf); out: return err; } static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, size, data); return ret; } static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, const void *data) { int ret; if (usb_autopm_get_interface(dev->intf) < 0) return -ENODEV; ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, size, data); usb_autopm_put_interface(dev->intf); return ret; } static int aqc111_write16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write16_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write32_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u32 *data) { u32 tmp = *data; cpu_to_le32s(&tmp); return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write32_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u32 *data) { u32 tmp = *data; cpu_to_le32s(&tmp); return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { return usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); } static int aqc111_write16_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd_async(dev, cmd, value, index, sizeof(tmp), &tmp); } static void aqc111_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; /* Inherit standard device info */ usbnet_get_drvinfo(net, info); strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u", aqc111_data->fw_ver.major, aqc111_data->fw_ver.minor, aqc111_data->fw_ver.rev); info->eedump_len = 0x00; info->regdump_len = 0x00; } static void aqc111_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; wolinfo->supported = WAKE_MAGIC; wolinfo->wolopts = 0; if (aqc111_data->wol_flags & AQ_WOL_FLAG_MP) wolinfo->wolopts |= WAKE_MAGIC; } static int aqc111_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; if (wolinfo->wolopts & ~WAKE_MAGIC) return -EINVAL; aqc111_data->wol_flags = 0; if (wolinfo->wolopts & WAKE_MAGIC) aqc111_data->wol_flags |= AQ_WOL_FLAG_MP; return 0; } static void aqc111_speed_to_link_mode(u32 speed, struct ethtool_link_ksettings *elk) { switch (speed) { case SPEED_5000: ethtool_link_ksettings_add_link_mode(elk, advertising, 5000baseT_Full); break; case SPEED_2500: ethtool_link_ksettings_add_link_mode(elk, advertising, 2500baseT_Full); break; case SPEED_1000: ethtool_link_ksettings_add_link_mode(elk, advertising, 1000baseT_Full); break; case SPEED_100: ethtool_link_ksettings_add_link_mode(elk, advertising, 100baseT_Full); break; } } static int aqc111_get_link_ksettings(struct net_device *net, struct ethtool_link_ksettings *elk) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; enum usb_device_speed usb_speed = dev->udev->speed; u32 speed = SPEED_UNKNOWN; ethtool_link_ksettings_zero_link_mode(elk, supported); ethtool_link_ksettings_add_link_mode(elk, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(elk, supported, 1000baseT_Full); if (usb_speed == USB_SPEED_SUPER) { ethtool_link_ksettings_add_link_mode(elk, supported, 2500baseT_Full); ethtool_link_ksettings_add_link_mode(elk, supported, 5000baseT_Full); } ethtool_link_ksettings_add_link_mode(elk, supported, TP); ethtool_link_ksettings_add_link_mode(elk, supported, Autoneg); elk->base.port = PORT_TP; elk->base.transceiver = XCVR_INTERNAL; elk->base.mdio_support = 0x00; /*Not supported*/ if (aqc111_data->autoneg) linkmode_copy(elk->link_modes.advertising, elk->link_modes.supported); else aqc111_speed_to_link_mode(aqc111_data->advertised_speed, elk); elk->base.autoneg = aqc111_data->autoneg; switch (aqc111_data->link_speed) { case AQ_INT_SPEED_5G: speed = SPEED_5000; break; case AQ_INT_SPEED_2_5G: speed = SPEED_2500; break; case AQ_INT_SPEED_1G: speed = SPEED_1000; break; case AQ_INT_SPEED_100M: speed = SPEED_100; break; } elk->base.duplex = DUPLEX_FULL; elk->base.speed = speed; return 0; } static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed) { struct aqc111_data *aqc111_data = dev->driver_priv; aqc111_data->phy_cfg &= ~AQ_ADV_MASK; aqc111_data->phy_cfg |= AQ_PAUSE; aqc111_data->phy_cfg |= AQ_ASYM_PAUSE; aqc111_data->phy_cfg |= AQ_DOWNSHIFT; aqc111_data->phy_cfg &= ~AQ_DSH_RETRIES_MASK; aqc111_data->phy_cfg |= (3 << AQ_DSH_RETRIES_SHIFT) & AQ_DSH_RETRIES_MASK; if (autoneg == AUTONEG_ENABLE) { switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; fallthrough; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; fallthrough; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; fallthrough; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; /* fall-through */ } } else { switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; break; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; break; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; break; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; break; } } aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); } static int aqc111_set_link_ksettings(struct net_device *net, const struct ethtool_link_ksettings *elk) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; enum usb_device_speed usb_speed = dev->udev->speed; u8 autoneg = elk->base.autoneg; u32 speed = elk->base.speed; if (autoneg == AUTONEG_ENABLE) { if (aqc111_data->autoneg != AUTONEG_ENABLE) { aqc111_data->autoneg = AUTONEG_ENABLE; aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ? SPEED_5000 : SPEED_1000; aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); } } else { if (speed != SPEED_100 && speed != SPEED_1000 && speed != SPEED_2500 && speed != SPEED_5000 && speed != SPEED_UNKNOWN) return -EINVAL; if (elk->base.duplex != DUPLEX_FULL) return -EINVAL; if (usb_speed != USB_SPEED_SUPER && speed > SPEED_1000) return -EINVAL; aqc111_data->autoneg = AUTONEG_DISABLE; if (speed != SPEED_UNKNOWN) aqc111_data->advertised_speed = speed; aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); } return 0; } static const struct ethtool_ops aqc111_ethtool_ops = { .get_drvinfo = aqc111_get_drvinfo, .get_wol = aqc111_get_wol, .set_wol = aqc111_set_wol, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_link = ethtool_op_get_link, .get_link_ksettings = aqc111_get_link_ksettings, .set_link_ksettings = aqc111_set_link_ksettings }; static int aqc111_change_mtu(struct net_device *net, int new_mtu) { struct usbnet *dev = netdev_priv(net); u16 reg16 = 0; u8 buf[5]; WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); if (net->mtu > 1500) reg16 |= SFR_MEDIUM_JUMBO_EN; else reg16 &= ~SFR_MEDIUM_JUMBO_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); if (dev->net->mtu > 12500) { memcpy(buf, &AQC111_BULKIN_SIZE[2], 5); /* RX bulk configuration */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf); } /* Set high low water level */ if (dev->net->mtu <= 4500) reg16 = 0x0810; else if (dev->net->mtu <= 9500) reg16 = 0x1020; else if (dev->net->mtu <= 12500) reg16 = 0x1420; else reg16 = 0x1A20; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, 2, ®16); return 0; } static int aqc111_set_mac_addr(struct net_device *net, void *p) { struct usbnet *dev = netdev_priv(net); int ret = 0; ret = eth_mac_addr(net, p); if (ret < 0) return ret; /* Set the MAC address */ return aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, ETH_ALEN, net->dev_addr); } static int aqc111_vlan_rx_kill_vid(struct net_device *net, __be16 proto, u16 vid) { struct usbnet *dev = netdev_priv(net); u8 vlan_ctrl = 0; u16 reg16 = 0; u8 reg8 = 0; aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); vlan_ctrl = reg8; /* Address */ reg8 = (vid / 16); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, ®8); /* Data */ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); reg16 &= ~(1 << (vid % 16)); aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); return 0; } static int aqc111_vlan_rx_add_vid(struct net_device *net, __be16 proto, u16 vid) { struct usbnet *dev = netdev_priv(net); u8 vlan_ctrl = 0; u16 reg16 = 0; u8 reg8 = 0; aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); vlan_ctrl = reg8; /* Address */ reg8 = (vid / 16); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, ®8); /* Data */ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); reg16 |= (1 << (vid % 16)); aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); return 0; } static void aqc111_set_rx_mode(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; int mc_count = 0; mc_count = netdev_mc_count(net); aqc111_data->rxctl &= ~(SFR_RX_CTL_PRO | SFR_RX_CTL_AMALL | SFR_RX_CTL_AM); if (net->flags & IFF_PROMISC) { aqc111_data->rxctl |= SFR_RX_CTL_PRO; } else if ((net->flags & IFF_ALLMULTI) || mc_count > AQ_MAX_MCAST) { aqc111_data->rxctl |= SFR_RX_CTL_AMALL; } else if (!netdev_mc_empty(net)) { u8 m_filter[AQ_MCAST_FILTER_SIZE] = { 0 }; struct netdev_hw_addr *ha = NULL; u32 crc_bits = 0; netdev_for_each_mc_addr(ha, net) { crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; m_filter[crc_bits >> 3] |= BIT(crc_bits & 7); } aqc111_write_cmd_async(dev, AQ_ACCESS_MAC, SFR_MULTI_FILTER_ARRY, AQ_MCAST_FILTER_SIZE, AQ_MCAST_FILTER_SIZE, m_filter); aqc111_data->rxctl |= SFR_RX_CTL_AM; } aqc111_write16_cmd_async(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); } static int aqc111_set_features(struct net_device *net, netdev_features_t features) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; netdev_features_t changed = net->features ^ features; u16 reg16 = 0; u8 reg8 = 0; if (changed & NETIF_F_IP_CSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); reg8 ^= SFR_TXCOE_TCP | SFR_TXCOE_UDP; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); } if (changed & NETIF_F_IPV6_CSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); reg8 ^= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); } if (changed & NETIF_F_RXCSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, ®8); if (features & NETIF_F_RXCSUM) { aqc111_data->rx_checksum = 1; reg8 &= ~(SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6); } else { aqc111_data->rx_checksum = 0; reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, ®8); } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { u16 i = 0; for (i = 0; i < 256; i++) { /* Address */ reg8 = i; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, ®8); /* Data */ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); reg8 = SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); } aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); reg8 |= SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); } else { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); reg8 &= ~SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); } } return 0; } static const struct net_device_ops aqc111_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = aqc111_change_mtu, .ndo_set_mac_address = aqc111_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = aqc111_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aqc111_vlan_rx_kill_vid, .ndo_set_rx_mode = aqc111_set_rx_mode, .ndo_set_features = aqc111_set_features, }; static int aqc111_read_perm_mac(struct usbnet *dev) { u8 buf[ETH_ALEN]; int ret; ret = aqc111_read_cmd(dev, AQ_FLASH_PARAMETERS, 0, 0, ETH_ALEN, buf); if (ret < 0) goto out; ether_addr_copy(dev->net->perm_addr, buf); return 0; out: return ret; } static void aqc111_read_fw_version(struct usbnet *dev, struct aqc111_data *aqc111_data) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MAJOR, 1, 1, &aqc111_data->fw_ver.major); aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MINOR, 1, 1, &aqc111_data->fw_ver.minor); aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_REV, 1, 1, &aqc111_data->fw_ver.rev); if (aqc111_data->fw_ver.major & 0x80) aqc111_data->fw_ver.major &= ~0x80; } static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); enum usb_device_speed usb_speed = udev->speed; struct aqc111_data *aqc111_data; int ret; /* Check if vendor configuration */ if (udev->actconfig->desc.bConfigurationValue != 1) { usb_driver_set_configuration(udev, 1); return -ENODEV; } usb_reset_configuration(dev->udev); ret = usbnet_get_endpoints(dev, intf); if (ret < 0) { netdev_dbg(dev->net, "usbnet_get_endpoints failed"); return ret; } aqc111_data = kzalloc(sizeof(*aqc111_data), GFP_KERNEL); if (!aqc111_data) return -ENOMEM; /* store aqc111_data pointer in device data field */ dev->driver_priv = aqc111_data; /* Init the MAC address */ ret = aqc111_read_perm_mac(dev); if (ret) goto out; eth_hw_addr_set(dev->net, dev->net->perm_addr); /* Set Rx urb size */ dev->rx_urb_size = URB_SIZE; /* Set TX needed headroom & tailroom */ dev->net->needed_headroom += sizeof(u64); dev->net->needed_tailroom += sizeof(u64); dev->net->max_mtu = 16334; dev->net->netdev_ops = &aqc111_netdev_ops; dev->net->ethtool_ops = &aqc111_ethtool_ops; if (usb_device_no_sg_constraint(dev->udev)) dev->can_dma_sg = 1; dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; dev->net->features |= AQ_SUPPORT_FEATURE; dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; netif_set_tso_max_size(dev->net, 65535); aqc111_read_fw_version(dev, aqc111_data); aqc111_data->autoneg = AUTONEG_ENABLE; aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ? SPEED_5000 : SPEED_1000; return 0; out: kfree(aqc111_data); return ret; } static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16; /* Force bz */ reg16 = SFR_PHYPWR_RSTCTL_BZ; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, ®16); reg16 = 0; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, ®16); /* Power down ethernet PHY */ aqc111_data->phy_cfg &= ~AQ_ADV_MASK; aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN; aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); kfree(aqc111_data); } static void aqc111_status(struct usbnet *dev, struct urb *urb) { struct aqc111_data *aqc111_data = dev->driver_priv; u64 *event_data = NULL; int link = 0; if (urb->actual_length < sizeof(*event_data)) return; event_data = urb->transfer_buffer; le64_to_cpus(event_data); if (*event_data & AQ_LS_MASK) link = 1; else link = 0; aqc111_data->link_speed = (*event_data & AQ_SPEED_MASK) >> AQ_SPEED_SHIFT; aqc111_data->link = link; if (netif_carrier_ok(dev->net) != link) usbnet_defer_kevent(dev, EVENT_LINK_RESET); } static void aqc111_configure_rx(struct usbnet *dev, struct aqc111_data *aqc111_data) { enum usb_device_speed usb_speed = dev->udev->speed; u16 link_speed = 0, usb_host = 0; u8 buf[5] = { 0 }; u8 queue_num = 0; u16 reg16 = 0; u8 reg8 = 0; buf[0] = 0x00; buf[1] = 0xF8; buf[2] = 0x07; switch (aqc111_data->link_speed) { case AQ_INT_SPEED_5G: link_speed = 5000; reg8 = 0x05; reg16 = 0x001F; break; case AQ_INT_SPEED_2_5G: link_speed = 2500; reg16 = 0x003F; break; case AQ_INT_SPEED_1G: link_speed = 1000; reg16 = 0x009F; break; case AQ_INT_SPEED_100M: link_speed = 100; queue_num = 1; reg16 = 0x063F; buf[1] = 0xFB; buf[2] = 0x4; break; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_INTER_PACKET_GAP_0, 1, 1, ®8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TX_PAUSE_RESEND_T, 3, 3, buf); switch (usb_speed) { case USB_SPEED_SUPER: usb_host = 3; break; case USB_SPEED_HIGH: usb_host = 2; break; case USB_SPEED_FULL: case USB_SPEED_LOW: usb_host = 1; queue_num = 0; break; default: usb_host = 0; break; } if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) queue_num = 2; /* For Jumbo packet 16KB */ memcpy(buf, &AQC111_BULKIN_SIZE[queue_num], 5); /* RX bulk configuration */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf); /* Set high low water level */ if (dev->net->mtu <= 4500) reg16 = 0x0810; else if (dev->net->mtu <= 9500) reg16 = 0x1020; else if (dev->net->mtu <= 12500) reg16 = 0x1420; else if (dev->net->mtu <= 16334) reg16 = 0x1A20; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, 2, ®16); netdev_info(dev->net, "Link Speed %d, USB %d", link_speed, usb_host); } static void aqc111_configure_csum_offload(struct usbnet *dev) { u8 reg8 = 0; if (dev->net->features & NETIF_F_RXCSUM) { reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, ®8); reg8 = 0; if (dev->net->features & NETIF_F_IP_CSUM) reg8 |= SFR_TXCOE_IP | SFR_TXCOE_TCP | SFR_TXCOE_UDP; if (dev->net->features & NETIF_F_IPV6_CSUM) reg8 |= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); } static int aqc111_link_reset(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16 = 0; u8 reg8 = 0; if (aqc111_data->link == 1) { /* Link up */ aqc111_configure_rx(dev, aqc111_data); /* Vlan Tag Filter */ reg8 = SFR_VLAN_CONTROL_VSO; if (dev->net->features & NETIF_F_HW_VLAN_CTAG_FILTER) reg8 |= SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, ®8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMTX_DMA_CONTROL, 1, 1, ®8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ARC_CTRL, 1, 1, ®8); reg16 = SFR_RX_CTL_IPE | SFR_RX_CTL_AB; aqc111_data->rxctl = reg16; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, ®8); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, ®8); reg16 = 0; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg16 = SFR_MEDIUM_XGMIIMODE | SFR_MEDIUM_FULL_DUPLEX; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); aqc111_configure_csum_offload(dev); aqc111_set_rx_mode(dev->net); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); if (dev->net->mtu > 1500) reg16 |= SFR_MEDIUM_JUMBO_EN; reg16 |= SFR_MEDIUM_RECEIVE_EN | SFR_MEDIUM_RXFLOW_CTRLEN | SFR_MEDIUM_TXFLOW_CTRLEN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); aqc111_data->rxctl |= SFR_RX_CTL_START; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); netif_carrier_on(dev->net); } else { aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); aqc111_data->rxctl &= ~SFR_RX_CTL_START; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); reg8 = SFR_BULK_OUT_FLUSH_EN | SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, ®8); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, ®8); netif_carrier_off(dev->net); } return 0; } static int aqc111_reset(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u8 reg8 = 0; dev->rx_urb_size = URB_SIZE; if (usb_device_no_sg_constraint(dev->udev)) dev->can_dma_sg = 1; dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; dev->net->features |= AQ_SUPPORT_FEATURE; dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; /* Power up ethernet PHY */ aqc111_data->phy_cfg = AQ_PHY_POWER_EN; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); /* Set the MAC address */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, ETH_ALEN, dev->net->dev_addr); reg8 = 0xFF; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, ®8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_SWP_CTRL, 1, 1, ®8); aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, ®8); reg8 &= ~(SFR_MONITOR_MODE_EPHYRW | SFR_MONITOR_MODE_RWLC | SFR_MONITOR_MODE_RWMP | SFR_MONITOR_MODE_RWWF | SFR_MONITOR_MODE_RW_FLAG); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, ®8); netif_carrier_off(dev->net); /* Phy advertise */ aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); return 0; } static int aqc111_stop(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16 = 0; aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg16 = 0; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); /* Put PHY to low power*/ aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); netif_carrier_off(dev->net); return 0; } static void aqc111_rx_checksum(struct sk_buff *skb, u64 pkt_desc) { u32 pkt_type = 0; skb->ip_summed = CHECKSUM_NONE; /* checksum error bit is set */ if (pkt_desc & AQ_RX_PD_L4_ERR || pkt_desc & AQ_RX_PD_L3_ERR) return; pkt_type = pkt_desc & AQ_RX_PD_L4_TYPE_MASK; /* It must be a TCP or UDP packet with a valid checksum */ if (pkt_type == AQ_RX_PD_L4_TCP || pkt_type == AQ_RX_PD_L4_UDP) skb->ip_summed = CHECKSUM_UNNECESSARY; } static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct aqc111_data *aqc111_data = dev->driver_priv; struct sk_buff *new_skb = NULL; u32 pkt_total_offset = 0; u64 *pkt_desc_ptr = NULL; u32 start_of_descs = 0; u32 desc_offset = 0; /*RX Header Offset*/ u16 pkt_count = 0; u64 desc_hdr = 0; u16 vlan_tag = 0; u32 skb_len; if (!skb) goto err; skb_len = skb->len; if (skb_len < sizeof(desc_hdr)) goto err; /* RX Descriptor Header */ skb_trim(skb, skb_len - sizeof(desc_hdr)); desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb)); /* Check these packets */ desc_offset = (desc_hdr & AQ_RX_DH_DESC_OFFSET_MASK) >> AQ_RX_DH_DESC_OFFSET_SHIFT; pkt_count = desc_hdr & AQ_RX_DH_PKT_CNT_MASK; start_of_descs = skb_len - ((pkt_count + 1) * sizeof(desc_hdr)); /* self check descs position */ if (start_of_descs != desc_offset) goto err; /* self check desc_offset from header and make sure that the * bounds of the metadata array are inside the SKB */ if (pkt_count * 2 + desc_offset >= skb_len) goto err; /* Packets must not overlap the metadata array */ skb_trim(skb, desc_offset); if (pkt_count == 0) goto err; /* Get the first RX packet descriptor */ pkt_desc_ptr = (u64 *)(skb->data + desc_offset); while (pkt_count--) { u64 pkt_desc = le64_to_cpup(pkt_desc_ptr); u32 pkt_len_with_padd = 0; u32 pkt_len = 0; pkt_len = (u32)((pkt_desc & AQ_RX_PD_LEN_MASK) >> AQ_RX_PD_LEN_SHIFT); pkt_len_with_padd = ((pkt_len + 7) & 0x7FFF8); pkt_total_offset += pkt_len_with_padd; if (pkt_total_offset > desc_offset || (pkt_count == 0 && pkt_total_offset != desc_offset)) { goto err; } if (pkt_desc & AQ_RX_PD_DROP || !(pkt_desc & AQ_RX_PD_RX_OK) || pkt_len > (dev->hard_mtu + AQ_RX_HW_PAD)) { skb_pull(skb, pkt_len_with_padd); /* Next RX Packet Descriptor */ pkt_desc_ptr++; continue; } new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len); if (!new_skb) goto err; skb_put(new_skb, pkt_len); memcpy(new_skb->data, skb->data, pkt_len); skb_pull(new_skb, AQ_RX_HW_PAD); if (aqc111_data->rx_checksum) aqc111_rx_checksum(new_skb, pkt_desc); if (pkt_desc & AQ_RX_PD_VLAN) { vlan_tag = pkt_desc >> AQ_RX_PD_VLAN_SHIFT; __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q), vlan_tag & VLAN_VID_MASK); } usbnet_skb_return(dev, new_skb); if (pkt_count == 0) break; skb_pull(skb, pkt_len_with_padd); /* Next RX Packet Header */ pkt_desc_ptr++; new_skb = NULL; } return 1; err: return 0; } static struct sk_buff *aqc111_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int frame_size = dev->maxpacket; struct sk_buff *new_skb = NULL; u64 *tx_desc_ptr = NULL; int padding_size = 0; int headroom = 0; int tailroom = 0; u64 tx_desc = 0; u16 tci = 0; /*Length of actual data*/ tx_desc |= skb->len & AQ_TX_DESC_LEN_MASK; /* TSO MSS */ tx_desc |= ((u64)(skb_shinfo(skb)->gso_size & AQ_TX_DESC_MSS_MASK)) << AQ_TX_DESC_MSS_SHIFT; headroom = (skb->len + sizeof(tx_desc)) % 8; if (headroom != 0) padding_size = 8 - headroom; if (((skb->len + sizeof(tx_desc) + padding_size) % frame_size) == 0) { padding_size += 8; tx_desc |= AQ_TX_DESC_DROP_PADD; } /* Vlan Tag */ if (vlan_get_tag(skb, &tci) >= 0) { tx_desc |= AQ_TX_DESC_VLAN; tx_desc |= ((u64)tci & AQ_TX_DESC_VLAN_MASK) << AQ_TX_DESC_VLAN_SHIFT; } if (!dev->can_dma_sg && (dev->net->features & NETIF_F_SG) && skb_linearize(skb)) return NULL; headroom = skb_headroom(skb); tailroom = skb_tailroom(skb); if (!(headroom >= sizeof(tx_desc) && tailroom >= padding_size)) { new_skb = skb_copy_expand(skb, sizeof(tx_desc), padding_size, flags); dev_kfree_skb_any(skb); skb = new_skb; if (!skb) return NULL; } if (padding_size != 0) skb_put_zero(skb, padding_size); /* Copy TX header */ tx_desc_ptr = skb_push(skb, sizeof(tx_desc)); *tx_desc_ptr = cpu_to_le64(tx_desc); usbnet_set_skb_tx_stats(skb, 1, 0); return skb; } static const struct driver_info aqc111_info = { .description = "Aquantia AQtion USB to 5GbE Controller", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #define ASIX111_DESC \ "ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter" static const struct driver_info asix111_info = { .description = ASIX111_DESC, .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #undef ASIX111_DESC #define ASIX112_DESC \ "ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter" static const struct driver_info asix112_info = { .description = ASIX112_DESC, .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #undef ASIX112_DESC static const struct driver_info trendnet_info = { .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; static const struct driver_info qnap_info = { .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); struct aqc111_data *aqc111_data = dev->driver_priv; u16 temp_rx_ctrl = 0x00; u16 reg16; u8 reg8; usbnet_suspend(intf, message); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); temp_rx_ctrl = reg16; /* Stop RX operations*/ reg16 &= ~SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); /* Force bz */ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, ®16); reg16 |= SFR_PHYPWR_RSTCTL_BZ; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, ®16); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, ®8); temp_rx_ctrl &= ~(SFR_RX_CTL_START | SFR_RX_CTL_RF_WAK | SFR_RX_CTL_AP | SFR_RX_CTL_AM); aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &temp_rx_ctrl); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, ®8); if (aqc111_data->wol_flags) { struct aqc111_wol_cfg wol_cfg; memset(&wol_cfg, 0, sizeof(struct aqc111_wol_cfg)); aqc111_data->phy_cfg |= AQ_WOL; ether_addr_copy(wol_cfg.hw_addr, dev->net->dev_addr); wol_cfg.flags = aqc111_data->wol_flags; temp_rx_ctrl |= (SFR_RX_CTL_AB | SFR_RX_CTL_START); aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &temp_rx_ctrl); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, ®8); reg8 = SFR_BMRX_DMA_EN; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, ®8); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, ®8); reg8 = 0x07; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 1, 1, ®8); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QTIMR_LOW, 1, 1, ®8); aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QTIMR_HIGH, 1, 1, ®8); reg8 = 0xFF; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QSIZE, 1, 1, ®8); aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QIFG, 1, 1, ®8); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg16 |= SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0, WOL_CFG_SIZE, &wol_cfg); aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); } else { aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); /* Disable RX path */ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); } return 0; } static int aqc111_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16; u8 reg8; netif_carrier_off(dev->net); /* Power up ethernet PHY */ aqc111_data->phy_cfg |= AQ_PHY_POWER_EN; aqc111_data->phy_cfg &= ~AQ_LOW_POWER; aqc111_data->phy_cfg &= ~AQ_WOL; reg8 = 0xFF; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, ®8); /* Configure RX control register => start operation */ reg16 = aqc111_data->rxctl; reg16 &= ~SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); reg16 |= SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg16 |= SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, ®16); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, ®8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, ®8); return usbnet_resume(intf); } #define AQC111_USB_ETH_DEV(vid, pid, table) \ USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_VENDOR_SPEC), \ .driver_info = (unsigned long)&(table) \ }, \ { \ USB_DEVICE_AND_INTERFACE_INFO((vid), (pid), \ USB_CLASS_COMM, \ USB_CDC_SUBCLASS_ETHERNET, \ USB_CDC_PROTO_NONE), \ .driver_info = (unsigned long)&(table), static const struct usb_device_id products[] = { {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)}, { },/* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver aq_driver = { .name = "aqc111", .id_table = products, .probe = usbnet_probe, .suspend = aqc111_suspend, .resume = aqc111_resume, .disconnect = usbnet_disconnect, }; module_usb_driver(aq_driver); MODULE_DESCRIPTION("Aquantia AQtion USB to 5/2.5GbE Controllers"); MODULE_LICENSE("GPL"); |
11 11 11 10 11 11 11 11 11 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 | // SPDX-License-Identifier: ISC /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> */ #include <linux/sched.h> #include <linux/of.h> #include "mt76.h" #define CHAN2G(_idx, _freq) { \ .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 30, \ } #define CHAN5G(_idx, _freq) { \ .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 30, \ } #define CHAN6G(_idx, _freq) { \ .band = NL80211_BAND_6GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 30, \ } static const struct ieee80211_channel mt76_channels_2ghz[] = { CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427), CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447), CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467), CHAN2G(13, 2472), CHAN2G(14, 2484), }; static const struct ieee80211_channel mt76_channels_5ghz[] = { CHAN5G(36, 5180), CHAN5G(40, 5200), CHAN5G(44, 5220), CHAN5G(48, 5240), CHAN5G(52, 5260), CHAN5G(56, 5280), CHAN5G(60, 5300), CHAN5G(64, 5320), CHAN5G(100, 5500), CHAN5G(104, 5520), CHAN5G(108, 5540), CHAN5G(112, 5560), CHAN5G(116, 5580), CHAN5G(120, 5600), CHAN5G(124, 5620), CHAN5G(128, 5640), CHAN5G(132, 5660), CHAN5G(136, 5680), CHAN5G(140, 5700), CHAN5G(144, 5720), CHAN5G(149, 5745), CHAN5G(153, 5765), CHAN5G(157, 5785), CHAN5G(161, 5805), CHAN5G(165, 5825), CHAN5G(169, 5845), CHAN5G(173, 5865), CHAN5G(177, 5885), }; static const struct ieee80211_channel mt76_channels_6ghz[] = { /* UNII-5 */ CHAN6G(1, 5955), CHAN6G(5, 5975), CHAN6G(9, 5995), CHAN6G(13, 6015), CHAN6G(17, 6035), CHAN6G(21, 6055), CHAN6G(25, 6075), CHAN6G(29, 6095), CHAN6G(33, 6115), CHAN6G(37, 6135), CHAN6G(41, 6155), CHAN6G(45, 6175), CHAN6G(49, 6195), CHAN6G(53, 6215), CHAN6G(57, 6235), CHAN6G(61, 6255), CHAN6G(65, 6275), CHAN6G(69, 6295), CHAN6G(73, 6315), CHAN6G(77, 6335), CHAN6G(81, 6355), CHAN6G(85, 6375), CHAN6G(89, 6395), CHAN6G(93, 6415), /* UNII-6 */ CHAN6G(97, 6435), CHAN6G(101, 6455), CHAN6G(105, 6475), CHAN6G(109, 6495), CHAN6G(113, 6515), CHAN6G(117, 6535), /* UNII-7 */ CHAN6G(121, 6555), CHAN6G(125, 6575), CHAN6G(129, 6595), CHAN6G(133, 6615), CHAN6G(137, 6635), CHAN6G(141, 6655), CHAN6G(145, 6675), CHAN6G(149, 6695), CHAN6G(153, 6715), CHAN6G(157, 6735), CHAN6G(161, 6755), CHAN6G(165, 6775), CHAN6G(169, 6795), CHAN6G(173, 6815), CHAN6G(177, 6835), CHAN6G(181, 6855), CHAN6G(185, 6875), /* UNII-8 */ CHAN6G(189, 6895), CHAN6G(193, 6915), CHAN6G(197, 6935), CHAN6G(201, 6955), CHAN6G(205, 6975), CHAN6G(209, 6995), CHAN6G(213, 7015), CHAN6G(217, 7035), CHAN6G(221, 7055), CHAN6G(225, 7075), CHAN6G(229, 7095), CHAN6G(233, 7115), }; static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { { .throughput = 0 * 1024, .blink_time = 334 }, { .throughput = 1 * 1024, .blink_time = 260 }, { .throughput = 5 * 1024, .blink_time = 220 }, { .throughput = 10 * 1024, .blink_time = 190 }, { .throughput = 20 * 1024, .blink_time = 170 }, { .throughput = 50 * 1024, .blink_time = 150 }, { .throughput = 70 * 1024, .blink_time = 130 }, { .throughput = 100 * 1024, .blink_time = 110 }, { .throughput = 200 * 1024, .blink_time = 80 }, { .throughput = 300 * 1024, .blink_time = 50 }, }; struct ieee80211_rate mt76_rates[] = { CCK_RATE(0, 10), CCK_RATE(1, 20), CCK_RATE(2, 55), CCK_RATE(3, 110), OFDM_RATE(11, 60), OFDM_RATE(15, 90), OFDM_RATE(10, 120), OFDM_RATE(14, 180), OFDM_RATE(9, 240), OFDM_RATE(13, 360), OFDM_RATE(8, 480), OFDM_RATE(12, 540), }; EXPORT_SYMBOL_GPL(mt76_rates); static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = { { .start_freq = 2402, .end_freq = 2494, }, { .start_freq = 5150, .end_freq = 5350, }, { .start_freq = 5350, .end_freq = 5470, }, { .start_freq = 5470, .end_freq = 5725, }, { .start_freq = 5725, .end_freq = 5950, }, { .start_freq = 5945, .end_freq = 6165, }, { .start_freq = 6165, .end_freq = 6405, }, { .start_freq = 6405, .end_freq = 6525, }, { .start_freq = 6525, .end_freq = 6705, }, { .start_freq = 6705, .end_freq = 6865, }, { .start_freq = 6865, .end_freq = 7125, }, }; static const struct cfg80211_sar_capa mt76_sar_capa = { .type = NL80211_SAR_TYPE_POWER, .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges), .freq_ranges = &mt76_sar_freq_ranges[0], }; static int mt76_led_init(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; struct ieee80211_hw *hw = phy->hw; struct device_node *np = dev->dev->of_node; if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set) return 0; np = of_get_child_by_name(np, "led"); if (np) { if (!of_device_is_available(np)) { of_node_put(np); dev_info(dev->dev, "led registration was explicitly disabled by dts\n"); return 0; } if (phy == &dev->phy) { int led_pin; if (!of_property_read_u32(np, "led-sources", &led_pin)) phy->leds.pin = led_pin; phy->leds.al = of_property_read_bool(np, "led-active-low"); } of_node_put(np); } snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s", wiphy_name(hw->wiphy)); phy->leds.cdev.name = phy->leds.name; phy->leds.cdev.default_trigger = ieee80211_create_tpt_led_trigger(hw, IEEE80211_TPT_LEDTRIG_FL_RADIO, mt76_tpt_blink, ARRAY_SIZE(mt76_tpt_blink)); dev_info(dev->dev, "registering led '%s'\n", phy->leds.name); return led_classdev_register(dev->dev, &phy->leds.cdev); } static void mt76_led_cleanup(struct mt76_phy *phy) { if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set) return; led_classdev_unregister(&phy->leds.cdev); } static void mt76_init_stream_cap(struct mt76_phy *phy, struct ieee80211_supported_band *sband, bool vht) { struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; int i, nstream = hweight8(phy->antenna_mask); struct ieee80211_sta_vht_cap *vht_cap; u16 mcs_map = 0; if (nstream > 1) ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; else ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0; if (!vht) return; vht_cap = &sband->vht_cap; if (nstream > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; for (i = 0; i < 8; i++) { if (i < nstream) mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2)); else mcs_map |= (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW)) vht_cap->vht_mcs.tx_highest |= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) { if (phy->cap.has_2ghz) mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); if (phy->cap.has_5ghz) mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); if (phy->cap.has_6ghz) mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht); } EXPORT_SYMBOL_GPL(mt76_set_stream_caps); static int mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, const struct ieee80211_channel *chan, int n_chan, struct ieee80211_rate *rates, int n_rates, bool ht, bool vht) { struct ieee80211_supported_band *sband = &msband->sband; struct ieee80211_sta_vht_cap *vht_cap; struct ieee80211_sta_ht_cap *ht_cap; struct mt76_dev *dev = phy->dev; void *chanlist; int size; size = n_chan * sizeof(*chan); chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); if (!chanlist) return -ENOMEM; msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan), GFP_KERNEL); if (!msband->chan) return -ENOMEM; sband->channels = chanlist; sband->n_channels = n_chan; sband->bitrates = rates; sband->n_bitrates = n_rates; if (!ht) return 0; ht_cap = &sband->ht_cap; ht_cap->ht_supported = true; ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; mt76_init_stream_cap(phy, sband, vht); if (!vht) return 0; vht_cap = &sband->vht_cap; vht_cap->vht_supported = true; vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_SHORT_GI_80 | (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); return 0; } static int mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, int n_rates) { phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz), rates, n_rates, true, false); } static int mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, int n_rates, bool vht) { phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz), rates, n_rates, true, vht); } static int mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates, int n_rates) { phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband; return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz, ARRAY_SIZE(mt76_channels_6ghz), rates, n_rates, false, false); } static void mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, enum nl80211_band band) { struct ieee80211_supported_band *sband = &msband->sband; bool found = false; int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED) continue; found = true; break; } if (found) { cfg80211_chandef_create(&phy->chandef, &sband->channels[0], NL80211_CHAN_HT20); phy->chan_state = &msband->chan[0]; phy->dev->band_phys[band] = phy; return; } sband->n_channels = 0; if (phy->hw->wiphy->bands[band] == sband) phy->hw->wiphy->bands[band] = NULL; } static int mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) { struct mt76_dev *dev = phy->dev; struct wiphy *wiphy = hw->wiphy; INIT_LIST_HEAD(&phy->tx_list); spin_lock_init(&phy->tx_lock); INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work); if ((void *)phy != hw->priv) return 0; SET_IEEE80211_DEV(hw, dev->dev); SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_AP_UAPSD; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); if (!wiphy->available_antennas_tx) wiphy->available_antennas_tx = phy->antenna_mask; if (!wiphy->available_antennas_rx) wiphy->available_antennas_rx = phy->antenna_mask; wiphy->sar_capa = &mt76_sar_capa; phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges, sizeof(struct mt76_freq_range_power), GFP_KERNEL); if (!phy->frp) return -ENOMEM; hw->txq_data_size = sizeof(struct mt76_txq); hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; if (!hw->max_tx_fragments) hw->max_tx_fragments = 16; ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); ieee80211_hw_set(hw, SPECTRUM_MGMT); if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) && hw->max_tx_fragments > 1) { ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); } ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); return 0; } struct mt76_phy * mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size, u8 band_idx) { struct ieee80211_hw *hw = dev->phy.hw; unsigned int phy_size; struct mt76_phy *phy; phy_size = ALIGN(sizeof(*phy), 8); phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL); if (!phy) return NULL; phy->dev = dev; phy->hw = hw; phy->priv = (void *)phy + phy_size; phy->band_idx = band_idx; return phy; } EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy); struct mt76_phy * mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, const struct ieee80211_ops *ops, u8 band_idx) { struct ieee80211_hw *hw; unsigned int phy_size; struct mt76_phy *phy; phy_size = ALIGN(sizeof(*phy), 8); hw = ieee80211_alloc_hw(size + phy_size, ops); if (!hw) return NULL; phy = hw->priv; phy->dev = dev; phy->hw = hw; phy->priv = hw->priv + phy_size; phy->band_idx = band_idx; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_ADHOC); return phy; } EXPORT_SYMBOL_GPL(mt76_alloc_phy); int mt76_register_phy(struct mt76_phy *phy, bool vht, struct ieee80211_rate *rates, int n_rates) { int ret; ret = mt76_phy_init(phy, phy->hw); if (ret) return ret; if (phy->cap.has_2ghz) { ret = mt76_init_sband_2g(phy, rates, n_rates); if (ret) return ret; } if (phy->cap.has_5ghz) { ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); if (ret) return ret; } if (phy->cap.has_6ghz) { ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); if (ret) return ret; } if (IS_ENABLED(CONFIG_MT76_LEDS)) { ret = mt76_led_init(phy); if (ret) return ret; } wiphy_read_of_freq_limits(phy->hw->wiphy); mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ); if ((void *)phy == phy->hw->priv) { ret = ieee80211_register_hw(phy->hw); if (ret) return ret; } set_bit(MT76_STATE_REGISTERED, &phy->state); phy->dev->phys[phy->band_idx] = phy; return 0; } EXPORT_SYMBOL_GPL(mt76_register_phy); void mt76_unregister_phy(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; if (!test_bit(MT76_STATE_REGISTERED, &phy->state)) return; if (IS_ENABLED(CONFIG_MT76_LEDS)) mt76_led_cleanup(phy); mt76_tx_status_check(dev, true); ieee80211_unregister_hw(phy->hw); dev->phys[phy->band_idx] = NULL; } EXPORT_SYMBOL_GPL(mt76_unregister_phy); int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q) { bool is_qrx = mt76_queue_is_rx(dev, q); struct page_pool_params pp_params = { .order = 0, .flags = 0, .nid = NUMA_NO_NODE, .dev = dev->dma_dev, }; int idx = is_qrx ? q - dev->q_rx : -1; /* Allocate page_pools just for rx/wed_tx_free queues */ if (!is_qrx && !mt76_queue_is_wed_tx_free(q)) return 0; switch (idx) { case MT_RXQ_MAIN: case MT_RXQ_BAND1: case MT_RXQ_BAND2: pp_params.pool_size = 256; break; default: pp_params.pool_size = 16; break; } if (mt76_is_mmio(dev)) { /* rely on page_pool for DMA mapping */ pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.dma_dir = DMA_FROM_DEVICE; pp_params.max_len = PAGE_SIZE; pp_params.offset = 0; /* NAPI is available just for rx queues */ if (idx >= 0 && idx < ARRAY_SIZE(dev->napi)) pp_params.napi = &dev->napi[idx]; } q->page_pool = page_pool_create(&pp_params); if (IS_ERR(q->page_pool)) { int err = PTR_ERR(q->page_pool); q->page_pool = NULL; return err; } return 0; } EXPORT_SYMBOL_GPL(mt76_create_page_pool); struct mt76_dev * mt76_alloc_device(struct device *pdev, unsigned int size, const struct ieee80211_ops *ops, const struct mt76_driver_ops *drv_ops) { struct ieee80211_hw *hw; struct mt76_phy *phy; struct mt76_dev *dev; int i; hw = ieee80211_alloc_hw(size, ops); if (!hw) return NULL; dev = hw->priv; dev->hw = hw; dev->dev = pdev; dev->drv = drv_ops; dev->dma_dev = pdev; phy = &dev->phy; phy->dev = dev; phy->hw = hw; phy->band_idx = MT_BAND0; dev->phys[phy->band_idx] = phy; spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); spin_lock_init(&dev->cc_lock); spin_lock_init(&dev->status_lock); spin_lock_init(&dev->wed_lock); mutex_init(&dev->mutex); init_waitqueue_head(&dev->tx_wait); skb_queue_head_init(&dev->mcu.res_q); init_waitqueue_head(&dev->mcu.wait); mutex_init(&dev->mcu.mutex); dev->tx_worker.fn = mt76_tx_worker; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_ADHOC); spin_lock_init(&dev->token_lock); idr_init(&dev->token); spin_lock_init(&dev->rx_token_lock); idr_init(&dev->rx_token); INIT_LIST_HEAD(&dev->wcid_list); INIT_LIST_HEAD(&dev->sta_poll_list); spin_lock_init(&dev->sta_poll_lock); INIT_LIST_HEAD(&dev->txwi_cache); INIT_LIST_HEAD(&dev->rxwi_cache); dev->token_size = dev->drv->token_size; INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work); for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) skb_queue_head_init(&dev->rx_skb[i]); dev->wq = alloc_ordered_workqueue("mt76", 0); if (!dev->wq) { ieee80211_free_hw(hw); return NULL; } return dev; } EXPORT_SYMBOL_GPL(mt76_alloc_device); int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates) { struct ieee80211_hw *hw = dev->hw; struct mt76_phy *phy = &dev->phy; int ret; dev_set_drvdata(dev->dev, dev); mt76_wcid_init(&dev->global_wcid, phy->band_idx); ret = mt76_phy_init(phy, hw); if (ret) return ret; if (phy->cap.has_2ghz) { ret = mt76_init_sband_2g(phy, rates, n_rates); if (ret) return ret; } if (phy->cap.has_5ghz) { ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); if (ret) return ret; } if (phy->cap.has_6ghz) { ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); if (ret) return ret; } wiphy_read_of_freq_limits(hw->wiphy); mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ); mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ); mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ); if (IS_ENABLED(CONFIG_MT76_LEDS)) { ret = mt76_led_init(phy); if (ret) return ret; } ret = ieee80211_register_hw(hw); if (ret) return ret; WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); set_bit(MT76_STATE_REGISTERED, &phy->state); sched_set_fifo_low(dev->tx_worker.task); return 0; } EXPORT_SYMBOL_GPL(mt76_register_device); void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state)) return; if (IS_ENABLED(CONFIG_MT76_LEDS)) mt76_led_cleanup(&dev->phy); mt76_tx_status_check(dev, true); mt76_wcid_cleanup(dev, &dev->global_wcid); ieee80211_unregister_hw(hw); } EXPORT_SYMBOL_GPL(mt76_unregister_device); void mt76_free_device(struct mt76_dev *dev) { mt76_worker_teardown(&dev->tx_worker); if (dev->wq) { destroy_workqueue(dev->wq); dev->wq = NULL; } ieee80211_free_hw(dev->hw); } EXPORT_SYMBOL_GPL(mt76_free_device); struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; struct mt76_chanctx *ctx; if (!hw->wiphy->n_radio) return hw->priv; if (!mlink->ctx) return NULL; ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv; return ctx->phy; } EXPORT_SYMBOL_GPL(mt76_vif_phy); static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) { struct sk_buff *skb = phy->rx_amsdu[q].head; struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_dev *dev = phy->dev; phy->rx_amsdu[q].head = NULL; phy->rx_amsdu[q].tail = NULL; /* * Validate if the amsdu has a proper first subframe. * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU * flag of the QoS header gets flipped. In such cases, the first * subframe has a LLC/SNAP header in the location of the destination * address. */ if (skb_shinfo(skb)->frag_list) { int offset = 0; if (!(status->flag & RX_FLAG_8023)) { offset = ieee80211_get_hdrlen_from_skb(skb); if ((status->flag & (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) == RX_FLAG_DECRYPTED) offset += 8; } if (ether_addr_equal(skb->data + offset, rfc1042_header)) { dev_kfree_skb(skb); return; } } __skb_queue_tail(&dev->rx_skb[q], skb); } static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; if (phy->rx_amsdu[q].head && (!status->amsdu || status->first_amsdu || status->seqno != phy->rx_amsdu[q].seqno)) mt76_rx_release_amsdu(phy, q); if (!phy->rx_amsdu[q].head) { phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; phy->rx_amsdu[q].seqno = status->seqno; phy->rx_amsdu[q].head = skb; } else { *phy->rx_amsdu[q].tail = skb; phy->rx_amsdu[q].tail = &skb->next; } if (!status->amsdu || status->last_amsdu) mt76_rx_release_amsdu(phy, q); } void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx); if (!test_bit(MT76_STATE_RUNNING, &phy->state)) { dev_kfree_skb(skb); return; } #ifdef CONFIG_NL80211_TESTMODE if (phy->test.state == MT76_TM_STATE_RX_FRAMES) { phy->test.rx_stats.packets[q]++; if (status->flag & RX_FLAG_FAILED_FCS_CRC) phy->test.rx_stats.fcs_error[q]++; } #endif mt76_rx_release_burst(phy, q, skb); } EXPORT_SYMBOL_GPL(mt76_rx); bool mt76_has_tx_pending(struct mt76_phy *phy) { struct mt76_queue *q; int i; for (i = 0; i < __MT_TXQ_MAX; i++) { q = phy->q_tx[i]; if (q && q->queued) return true; } return false; } EXPORT_SYMBOL_GPL(mt76_has_tx_pending); static struct mt76_channel_state * mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) { struct mt76_sband *msband; int idx; if (c->band == NL80211_BAND_2GHZ) msband = &phy->sband_2g; else if (c->band == NL80211_BAND_6GHZ) msband = &phy->sband_6g; else msband = &phy->sband_5g; idx = c - &msband->sband.channels[0]; return &msband->chan[idx]; } void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) { struct mt76_channel_state *state = phy->chan_state; state->cc_active += ktime_to_us(ktime_sub(time, phy->survey_time)); phy->survey_time = time; } EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); void mt76_update_survey(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; ktime_t cur_time; if (dev->drv->update_survey) dev->drv->update_survey(phy); cur_time = ktime_get_boottime(); mt76_update_survey_active_time(phy, cur_time); if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) { struct mt76_channel_state *state = phy->chan_state; spin_lock_bh(&dev->cc_lock); state->cc_bss_rx += dev->cur_cc_bss_rx; dev->cur_cc_bss_rx = 0; spin_unlock_bh(&dev->cc_lock); } } EXPORT_SYMBOL_GPL(mt76_update_survey); int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, bool offchannel) { struct mt76_dev *dev = phy->dev; int timeout = HZ / 5; int ret; set_bit(MT76_RESET, &phy->state); mt76_worker_disable(&dev->tx_worker); wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); mt76_update_survey(phy); if (phy->chandef.chan->center_freq != chandef->chan->center_freq || phy->chandef.width != chandef->width) phy->dfs_state = MT_DFS_STATE_UNKNOWN; phy->chandef = *chandef; phy->chan_state = mt76_channel_state(phy, chandef->chan); phy->offchannel = offchannel; if (!offchannel) phy->main_chandef = *chandef; if (chandef->chan != phy->main_chandef.chan) memset(phy->chan_state, 0, sizeof(*phy->chan_state)); ret = dev->drv->set_channel(phy); clear_bit(MT76_RESET, &phy->state); mt76_worker_enable(&dev->tx_worker); mt76_worker_schedule(&dev->tx_worker); return ret; } int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, bool offchannel) { struct mt76_dev *dev = phy->dev; int ret; cancel_delayed_work_sync(&phy->mac_work); mutex_lock(&dev->mutex); ret = __mt76_set_channel(phy, chandef, offchannel); mutex_unlock(&dev->mutex); return ret; } int mt76_update_channel(struct mt76_phy *phy) { struct ieee80211_hw *hw = phy->hw; struct cfg80211_chan_def *chandef = &hw->conf.chandef; bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; phy->radar_enabled = hw->conf.radar_enabled; return mt76_set_channel(phy, chandef, offchannel); } EXPORT_SYMBOL_GPL(mt76_update_channel); static struct mt76_sband * mt76_get_survey_sband(struct mt76_phy *phy, int *idx) { if (*idx < phy->sband_2g.sband.n_channels) return &phy->sband_2g; *idx -= phy->sband_2g.sband.n_channels; if (*idx < phy->sband_5g.sband.n_channels) return &phy->sband_5g; *idx -= phy->sband_5g.sband.n_channels; if (*idx < phy->sband_6g.sband.n_channels) return &phy->sband_6g; *idx -= phy->sband_6g.sband.n_channels; return NULL; } int mt76_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct mt76_phy *phy = hw->priv; struct mt76_dev *dev = phy->dev; struct mt76_sband *sband = NULL; struct ieee80211_channel *chan; struct mt76_channel_state *state; int phy_idx = 0; int ret = 0; mutex_lock(&dev->mutex); for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) { sband = NULL; phy = dev->phys[phy_idx]; if (!phy || phy->hw != hw) continue; sband = mt76_get_survey_sband(phy, &idx); if (idx == 0 && phy->dev->drv->update_survey) mt76_update_survey(phy); if (sband || !hw->wiphy->n_radio) break; } if (!sband) { ret = -ENOENT; goto out; } chan = &sband->sband.channels[idx]; state = mt76_channel_state(phy, chan); memset(survey, 0, sizeof(*survey)); survey->channel = chan; survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; survey->filled |= dev->drv->survey_flags; if (state->noise) survey->filled |= SURVEY_INFO_NOISE_DBM; if (chan == phy->main_chandef.chan) { survey->filled |= SURVEY_INFO_IN_USE; if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) survey->filled |= SURVEY_INFO_TIME_BSS_RX; } survey->time_busy = div_u64(state->cc_busy, 1000); survey->time_rx = div_u64(state->cc_rx, 1000); survey->time = div_u64(state->cc_active, 1000); survey->noise = state->noise; spin_lock_bh(&dev->cc_lock); survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000); survey->time_tx = div_u64(state->cc_tx, 1000); spin_unlock_bh(&dev->cc_lock); out: mutex_unlock(&dev->mutex); return ret; } EXPORT_SYMBOL_GPL(mt76_get_survey); void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key) { struct ieee80211_key_seq seq; int i; wcid->rx_check_pn = false; if (!key) return; if (key->cipher != WLAN_CIPHER_SUITE_CCMP) return; wcid->rx_check_pn = true; /* data frame */ for (i = 0; i < IEEE80211_NUM_TIDS; i++) { ieee80211_get_key_rx_seq(key, i, &seq); memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); } /* robust management frame */ ieee80211_get_key_rx_seq(key, -1, &seq); memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); } EXPORT_SYMBOL(mt76_wcid_key_setup); int mt76_rx_signal(u8 chain_mask, s8 *chain_signal) { int signal = -128; u8 chains; for (chains = chain_mask; chains; chains >>= 1, chain_signal++) { int cur, diff; cur = *chain_signal; if (!(chains & BIT(0)) || cur > 0) continue; if (cur > signal) swap(cur, signal); diff = signal - cur; if (diff == 0) signal += 3; else if (diff <= 2) signal += 2; else if (diff <= 6) signal += 1; } return signal; } EXPORT_SYMBOL(mt76_rx_signal); static void mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_hw **hw, struct ieee80211_sta **sta) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct mt76_rx_status mstat; mstat = *((struct mt76_rx_status *)skb->cb); memset(status, 0, sizeof(*status)); status->flag = mstat.flag; status->freq = mstat.freq; status->enc_flags = mstat.enc_flags; status->encoding = mstat.encoding; status->bw = mstat.bw; if (status->encoding == RX_ENC_EHT) { status->eht.ru = mstat.eht.ru; status->eht.gi = mstat.eht.gi; } else { status->he_ru = mstat.he_ru; status->he_gi = mstat.he_gi; status->he_dcm = mstat.he_dcm; } status->rate_idx = mstat.rate_idx; status->nss = mstat.nss; status->band = mstat.band; status->signal = mstat.signal; status->chains = mstat.chains; status->ampdu_reference = mstat.ampdu_ref; status->device_timestamp = mstat.timestamp; status->mactime = mstat.timestamp; status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal); if (status->signal <= -128) status->flag |= RX_FLAG_NO_SIGNAL_VAL; if (ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) status->boottime_ns = ktime_get_boottime_ns(); BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal)); memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal)); if (mstat.wcid) { status->link_valid = mstat.wcid->link_valid; status->link_id = mstat.wcid->link_id; } *sta = wcid_to_sta(mstat.wcid); *hw = mt76_phy_hw(dev, mstat.phy_idx); } static void mt76_check_ccmp_pn(struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; struct ieee80211_hdr *hdr; int security_idx; int ret; if (!(status->flag & RX_FLAG_DECRYPTED)) return; if (status->flag & RX_FLAG_ONLY_MONITOR) return; if (!wcid || !wcid->rx_check_pn) return; security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; if (status->flag & RX_FLAG_8023) goto skip_hdr_check; hdr = mt76_skb_get_hdr(skb); if (!(status->flag & RX_FLAG_IV_STRIPPED)) { /* * Validate the first fragment both here and in mac80211 * All further fragments will be validated by mac80211 only. */ if (ieee80211_is_frag(hdr) && !ieee80211_is_first_frag(hdr->frame_control)) return; } /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c): * * the recipient shall maintain a single replay counter for received * individually addressed robust Management frames that are received * with the To DS subfield equal to 0, [...] */ if (ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_has_tods(hdr->frame_control)) security_idx = IEEE80211_NUM_TIDS; skip_hdr_check: BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); ret = memcmp(status->iv, wcid->rx_key_pn[security_idx], sizeof(status->iv)); if (ret <= 0) { status->flag |= RX_FLAG_ONLY_MONITOR; return; } memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv)); if (status->flag & RX_FLAG_IV_STRIPPED) status->flag |= RX_FLAG_PN_VALIDATED; } static void mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, int len) { struct mt76_wcid *wcid = status->wcid; struct ieee80211_rx_status info = { .enc_flags = status->enc_flags, .rate_idx = status->rate_idx, .encoding = status->encoding, .band = status->band, .nss = status->nss, .bw = status->bw, }; struct ieee80211_sta *sta; u32 airtime; u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); spin_lock(&dev->cc_lock); dev->cur_cc_bss_rx += airtime; spin_unlock(&dev->cc_lock); if (!wcid || !wcid->sta) return; sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); ieee80211_sta_register_airtime(sta, tidno, 0, airtime); } static void mt76_airtime_flush_ampdu(struct mt76_dev *dev) { struct mt76_wcid *wcid; int wcid_idx; if (!dev->rx_ampdu_len) return; wcid_idx = dev->rx_ampdu_status.wcid_idx; if (wcid_idx < ARRAY_SIZE(dev->wcid)) wcid = rcu_dereference(dev->wcid[wcid_idx]); else wcid = NULL; dev->rx_ampdu_status.wcid = wcid; mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len); dev->rx_ampdu_len = 0; dev->rx_ampdu_ref = 0; } static void mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct mt76_wcid *wcid = status->wcid; if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)) return; if (!wcid || !wcid->sta) { struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); if (status->flag & RX_FLAG_8023) return; if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) return; wcid = NULL; } if (!(status->flag & RX_FLAG_AMPDU_DETAILS) || status->ampdu_ref != dev->rx_ampdu_ref) mt76_airtime_flush_ampdu(dev); if (status->flag & RX_FLAG_AMPDU_DETAILS) { if (!dev->rx_ampdu_len || status->ampdu_ref != dev->rx_ampdu_ref) { dev->rx_ampdu_status = *status; dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff; dev->rx_ampdu_ref = status->ampdu_ref; } dev->rx_ampdu_len += skb->len; return; } mt76_airtime_report(dev, status, skb->len); } static void mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); struct ieee80211_sta *sta; struct ieee80211_hw *hw; struct mt76_wcid *wcid = status->wcid; u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; bool ps; hw = mt76_phy_hw(dev, status->phy_idx); if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && !(status->flag & RX_FLAG_8023)) { sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); if (sta) wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; } mt76_airtime_check(dev, skb); if (!wcid || !wcid->sta) return; sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); if (status->signal <= 0) ewma_signal_add(&wcid->rssi, -status->signal); wcid->inactive_count = 0; if (status->flag & RX_FLAG_8023) return; if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) return; if (ieee80211_is_pspoll(hdr->frame_control)) { ieee80211_sta_pspoll(sta); return; } if (ieee80211_has_morefrags(hdr->frame_control) || !(ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_data(hdr->frame_control))) return; ps = ieee80211_has_pm(hdr->frame_control); if (ps && (ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control))) ieee80211_sta_uapsd_trigger(sta, tidno); if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) return; if (ps) set_bit(MT_WCID_FLAG_PS, &wcid->flags); if (dev->drv->sta_ps) dev->drv->sta_ps(dev, sta, ps); if (!ps) clear_bit(MT_WCID_FLAG_PS, &wcid->flags); ieee80211_sta_ps_transition(sta, ps); } void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, struct napi_struct *napi) { struct ieee80211_sta *sta; struct ieee80211_hw *hw; struct sk_buff *skb, *tmp; LIST_HEAD(list); spin_lock(&dev->rx_lock); while ((skb = __skb_dequeue(frames)) != NULL) { struct sk_buff *nskb = skb_shinfo(skb)->frag_list; mt76_check_ccmp_pn(skb); skb_shinfo(skb)->frag_list = NULL; mt76_rx_convert(dev, skb, &hw, &sta); ieee80211_rx_list(hw, sta, skb, &list); /* subsequent amsdu frames */ while (nskb) { skb = nskb; nskb = nskb->next; skb->next = NULL; mt76_rx_convert(dev, skb, &hw, &sta); ieee80211_rx_list(hw, sta, skb, &list); } } spin_unlock(&dev->rx_lock); if (!napi) { netif_receive_skb_list(&list); return; } list_for_each_entry_safe(skb, tmp, &list, list) { skb_list_del_init(skb); napi_gro_receive(napi, skb); } } void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, struct napi_struct *napi) { struct sk_buff_head frames; struct sk_buff *skb; __skb_queue_head_init(&frames); while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { mt76_check_sta(dev, skb); if (mtk_wed_device_active(&dev->mmio.wed)) __skb_queue_tail(&frames, skb); else mt76_rx_aggr_reorder(skb, &frames); } mt76_rx_complete(dev, &frames, napi); } EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); static int mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; struct mt76_dev *dev = phy->dev; int ret; int i; mutex_lock(&dev->mutex); ret = dev->drv->sta_add(dev, vif, sta); if (ret) goto out; for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct mt76_txq *mtxq; if (!sta->txq[i]) continue; mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; mtxq->wcid = wcid->idx; } ewma_signal_init(&wcid->rssi); rcu_assign_pointer(dev->wcid[wcid->idx], wcid); phy->num_sta++; mt76_wcid_init(wcid, phy->band_idx); out: mutex_unlock(&dev->mutex); return ret; } void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt76_dev *dev = phy->dev; struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; int i, idx = wcid->idx; for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++) mt76_rx_aggr_stop(dev, wcid, i); if (dev->drv->sta_remove) dev->drv->sta_remove(dev, vif, sta); mt76_wcid_cleanup(dev, wcid); mt76_wcid_mask_clear(dev->wcid_mask, idx); phy->num_sta--; } EXPORT_SYMBOL_GPL(__mt76_sta_remove); static void mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt76_dev *dev = phy->dev; mutex_lock(&dev->mutex); __mt76_sta_remove(phy, vif, sta); mutex_unlock(&dev->mutex); } int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct mt76_phy *phy = hw->priv; struct mt76_dev *dev = phy->dev; enum mt76_sta_event ev; phy = mt76_vif_phy(hw, vif); if (!phy) return -EINVAL; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) return mt76_sta_add(phy, vif, sta); if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) mt76_sta_remove(phy, vif, sta); if (!dev->drv->sta_event) return 0; if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) ev = MT76_STA_EVENT_ASSOC; else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) ev = MT76_STA_EVENT_AUTHORIZE; else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) ev = MT76_STA_EVENT_DISASSOC; else return 0; return dev->drv->sta_event(dev, vif, sta, ev); } EXPORT_SYMBOL_GPL(mt76_sta_state); void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt76_phy *phy = hw->priv; struct mt76_dev *dev = phy->dev; struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; mutex_lock(&dev->mutex); spin_lock_bh(&dev->status_lock); rcu_assign_pointer(dev->wcid[wcid->idx], NULL); spin_unlock_bh(&dev->status_lock); mutex_unlock(&dev->mutex); } EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx) { wcid->hw_key_idx = -1; wcid->phy_idx = band_idx; INIT_LIST_HEAD(&wcid->tx_list); skb_queue_head_init(&wcid->tx_pending); skb_queue_head_init(&wcid->tx_offchannel); INIT_LIST_HEAD(&wcid->list); idr_init(&wcid->pktid); INIT_LIST_HEAD(&wcid->poll_list); } EXPORT_SYMBOL_GPL(mt76_wcid_init); void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) { struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx); struct ieee80211_hw *hw; struct sk_buff_head list; struct sk_buff *skb; mt76_tx_status_lock(dev, &list); mt76_tx_status_skb_get(dev, wcid, -1, &list); mt76_tx_status_unlock(dev, &list); idr_destroy(&wcid->pktid); spin_lock_bh(&phy->tx_lock); if (!list_empty(&wcid->tx_list)) list_del_init(&wcid->tx_list); spin_lock(&wcid->tx_pending.lock); skb_queue_splice_tail_init(&wcid->tx_pending, &list); spin_unlock(&wcid->tx_pending.lock); spin_unlock_bh(&phy->tx_lock); while ((skb = __skb_dequeue(&list)) != NULL) { hw = mt76_tx_status_get_hw(dev, skb); ieee80211_free_txskb(hw, skb); } } EXPORT_SYMBOL_GPL(mt76_wcid_cleanup); void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid) { if (test_bit(MT76_MCU_RESET, &dev->phy.state)) return; spin_lock_bh(&dev->sta_poll_lock); if (list_empty(&wcid->poll_list)) list_add_tail(&wcid->poll_list, &dev->sta_poll_list); spin_unlock_bh(&dev->sta_poll_lock); } EXPORT_SYMBOL_GPL(mt76_wcid_add_poll); s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower) { int n_chains = hweight16(phy->chainmask); txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2); txpower -= mt76_tx_power_path_delta(n_chains); return txpower; } EXPORT_SYMBOL_GPL(mt76_get_power_bound); int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, int *dbm) { struct mt76_phy *phy = mt76_vif_phy(hw, vif); int n_chains, delta; if (!phy) return -EINVAL; n_chains = hweight16(phy->chainmask); delta = mt76_tx_power_path_delta(n_chains); *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2); return 0; } EXPORT_SYMBOL_GPL(mt76_get_txpower); int mt76_init_sar_power(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { struct mt76_phy *phy = hw->priv; const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; int i; if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) return -EINVAL; for (i = 0; i < sar->num_sub_specs; i++) { u32 index = sar->sub_specs[i].freq_range_index; /* SAR specifies power limitaton in 0.25dbm */ s32 power = sar->sub_specs[i].power >> 1; if (power > 127 || power < -127) power = 127; phy->frp[index].range = &capa->freq_ranges[index]; phy->frp[index].power = power; } return 0; } EXPORT_SYMBOL_GPL(mt76_init_sar_power); int mt76_get_sar_power(struct mt76_phy *phy, struct ieee80211_channel *chan, int power) { const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; int freq, i; if (!capa || !phy->frp) return power; if (power > 127 || power < -127) power = 127; freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); for (i = 0 ; i < capa->num_freq_ranges; i++) { if (phy->frp[i].range && freq >= phy->frp[i].range->start_freq && freq < phy->frp[i].range->end_freq) { power = min_t(int, phy->frp[i].power, power); break; } } return power; } EXPORT_SYMBOL_GPL(mt76_get_sar_power); static void __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0)) ieee80211_csa_finish(vif, 0); } void mt76_csa_finish(struct mt76_dev *dev) { if (!dev->csa_complete) return; ieee80211_iterate_active_interfaces_atomic(dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, __mt76_csa_finish, dev); dev->csa_complete = 0; } EXPORT_SYMBOL_GPL(mt76_csa_finish); static void __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt76_dev *dev = priv; if (!vif->bss_conf.csa_active) return; dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0); } void mt76_csa_check(struct mt76_dev *dev) { ieee80211_iterate_active_interfaces_atomic(dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, __mt76_csa_check, dev); } EXPORT_SYMBOL_GPL(mt76_csa_check); int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { return 0; } EXPORT_SYMBOL_GPL(mt76_set_tim); void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; int hdr_len = ieee80211_get_hdrlen_from_skb(skb); u8 *hdr, *pn = status->iv; __skb_push(skb, 8); memmove(skb->data, skb->data + 8, hdr_len); hdr = skb->data + hdr_len; hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; status->flag &= ~RX_FLAG_IV_STRIPPED; } EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr); int mt76_get_rate(struct mt76_dev *dev, struct ieee80211_supported_band *sband, int idx, bool cck) { bool is_2g = sband->band == NL80211_BAND_2GHZ; int i, offset = 0, len = sband->n_bitrates; if (cck) { if (!is_2g) return 0; idx &= ~BIT(2); /* short preamble */ } else if (is_2g) { offset = 4; } for (i = offset; i < len; i++) { if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) return i; } return 0; } EXPORT_SYMBOL_GPL(mt76_get_rate); void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac) { struct mt76_phy *phy = hw->priv; set_bit(MT76_SCANNING, &phy->state); } EXPORT_SYMBOL_GPL(mt76_sw_scan); void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt76_phy *phy = hw->priv; clear_bit(MT76_SCANNING, &phy->state); } EXPORT_SYMBOL_GPL(mt76_sw_scan_complete); int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct mt76_phy *phy = hw->priv; struct mt76_dev *dev = phy->dev; int i; mutex_lock(&dev->mutex); *tx_ant = 0; for (i = 0; i < ARRAY_SIZE(dev->phys); i++) if (dev->phys[i] && dev->phys[i]->hw == hw) *tx_ant |= dev->phys[i]->chainmask; *rx_ant = *tx_ant; mutex_unlock(&dev->mutex); return 0; } EXPORT_SYMBOL_GPL(mt76_get_antenna); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, int ring_base, void *wed, u32 flags) { struct mt76_queue *hwq; int err; hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); if (!hwq) return ERR_PTR(-ENOMEM); hwq->flags = flags; hwq->wed = wed; err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); if (err < 0) return ERR_PTR(err); return hwq; } EXPORT_SYMBOL_GPL(mt76_init_queue); void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, struct mt76_sta_stats *stats, bool eht) { int i, ei = wi->initial_stat_idx; u64 *data = wi->data; wi->sta_count++; data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU]; if (eht) { data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG]; data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU]; } for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++) data[ei++] += stats->tx_bw[i]; for (i = 0; i < (eht ? 14 : 12); i++) data[ei++] += stats->tx_mcs[i]; for (i = 0; i < 4; i++) data[ei++] += stats->tx_nss[i]; wi->worker_stat_count = ei - wi->initial_stat_idx; } EXPORT_SYMBOL_GPL(mt76_ethtool_worker); void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index) { #ifdef CONFIG_PAGE_POOL_STATS struct page_pool_stats stats = {}; int i; mt76_for_each_q_rx(dev, i) page_pool_get_stats(dev->q_rx[i].page_pool, &stats); page_pool_ethtool_stats_get(data, &stats); *index += page_pool_ethtool_stats_get_count(); #endif } EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats); enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy) { struct ieee80211_hw *hw = phy->hw; struct mt76_dev *dev = phy->dev; if (dev->region == NL80211_DFS_UNSET || test_bit(MT76_SCANNING, &phy->state)) return MT_DFS_STATE_DISABLED; if (!phy->radar_enabled) { if ((hw->conf.flags & IEEE80211_CONF_MONITOR) && (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) return MT_DFS_STATE_ACTIVE; return MT_DFS_STATE_DISABLED; } if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP)) return MT_DFS_STATE_CAC; return MT_DFS_STATE_ACTIVE; } EXPORT_SYMBOL_GPL(mt76_phy_dfs_state); void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif) { struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; struct mt76_vif_data *mvif = mlink->mvif; rcu_assign_pointer(mvif->link[0], NULL); mt76_abort_scan(dev); if (mvif->roc_phy) mt76_abort_roc(mvif->roc_phy); } EXPORT_SYMBOL_GPL(mt76_vif_cleanup); |
106 4 4 57 32 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DEVICE_DEVRES_H_ #define _DEVICE_DEVRES_H_ #include <linux/err.h> #include <linux/gfp_types.h> #include <linux/numa.h> #include <linux/overflow.h> #include <linux/stdarg.h> #include <linux/types.h> #include <asm/bug.h> struct device; struct device_node; struct resource; /* device resource management */ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); void * __malloc __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name); #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, void (*fn)(struct device *, void *, void *), void *data); void devres_free(void *res); void devres_add(struct device *dev, void *res); void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data); void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); /* devres group */ void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); void devres_close_group(struct device *dev, void *id); void devres_remove_group(struct device *dev, void *id); int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ void * __alloc_size(2) devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); void * __must_check __realloc_size(3) devm_krealloc(struct device *dev, void *ptr, size_t size, gfp_t gfp); static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); } static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return devm_kmalloc(dev, bytes, flags); } static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags) { return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } static inline __realloc_size(3, 4) void * __must_check devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) return NULL; return devm_krealloc(dev, p, bytes, flags); } void devm_kfree(struct device *dev, const void *p); void * __realloc_size(3) devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); static inline void *devm_kmemdup_array(struct device *dev, const void *src, size_t n, size_t size, gfp_t flags) { return devm_kmemdup(dev, src, size_mul(size, n), flags); } char * __malloc devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); char * __printf(3, 0) __malloc devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap); char * __printf(3, 4) __malloc devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); void devm_free_pages(struct device *dev, unsigned long addr); #ifdef CONFIG_HAS_IOMEM void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res); void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res); void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, resource_size_t *size); #else static inline void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res) { return IOMEM_ERR_PTR(-EINVAL); } static inline void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res) { return IOMEM_ERR_PTR(-EINVAL); } static inline void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, resource_size_t *size) { return IOMEM_ERR_PTR(-EINVAL); } #endif /* allows to add/remove a custom action to devres stack */ int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data); /** * devm_remove_action() - removes previously added custom action * @dev: Device that owns the action * @action: Function implementing the action * @data: Pointer to data passed to @action implementation * * Removes instance of @action previously added by devm_add_action(). * Both action and data should match one of the existing entries. */ static inline void devm_remove_action(struct device *dev, void (*action)(void *), void *data) { WARN_ON(devm_remove_action_nowarn(dev, action, data)); } void devm_release_action(struct device *dev, void (*action)(void *), void *data); int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name); #define devm_add_action(dev, action, data) \ __devm_add_action(dev, action, data, #action) static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data, const char *name) { int ret; ret = __devm_add_action(dev, action, data, name); if (ret) action(data); return ret; } #define devm_add_action_or_reset(dev, action, data) \ __devm_add_action_or_reset(dev, action, data, #action) bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data); #endif /* _DEVICE_DEVRES_H_ */ |
1 1 2 5 1 1 1 1 1 1 5 5 5 4 2 1 1 1 4 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 | // SPDX-License-Identifier: GPL-2.0+ /* * Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable * * Current development and maintenance by: * (c) 2000, 2001 Robert Baruch (autophile@starband.net) * (c) 2004, 2005 Daniel Drake <dsd@gentoo.org> * * Developed with the assistance of: * (c) 2002 Alan Stern <stern@rowland.org> * * Flash support based on earlier work by: * (c) 2002 Thomas Kreiling <usbdev@sm04.de> * * Many originally ATAPI devices were slightly modified to meet the USB * market by using some kind of translation from ATAPI to USB on the host, * and the peripheral would translate from USB back to ATAPI. * * SCM Microsystems (www.scmmicro.com) makes a device, sold to OEM's only, * which does the USB-to-ATAPI conversion. By obtaining the data sheet on * their device under nondisclosure agreement, I have been able to write * this driver for Linux. * * The chip used in the device can also be used for EPP and ISA translation * as well. This driver is only guaranteed to work with the ATAPI * translation. * * See the Kconfig help text for a list of devices known to be supported by * this driver. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string_choices.h> #include <linux/cdrom.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-usbat" MODULE_DESCRIPTION("Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable"); MODULE_AUTHOR("Daniel Drake <dsd@gentoo.org>, Robert Baruch <autophile@starband.net>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); /* Supported device types */ #define USBAT_DEV_HP8200 0x01 #define USBAT_DEV_FLASH 0x02 #define USBAT_EPP_PORT 0x10 #define USBAT_EPP_REGISTER 0x30 #define USBAT_ATA 0x40 #define USBAT_ISA 0x50 /* Commands (need to be logically OR'd with an access type */ #define USBAT_CMD_READ_REG 0x00 #define USBAT_CMD_WRITE_REG 0x01 #define USBAT_CMD_READ_BLOCK 0x02 #define USBAT_CMD_WRITE_BLOCK 0x03 #define USBAT_CMD_COND_READ_BLOCK 0x04 #define USBAT_CMD_COND_WRITE_BLOCK 0x05 #define USBAT_CMD_WRITE_REGS 0x07 /* Commands (these don't need an access type) */ #define USBAT_CMD_EXEC_CMD 0x80 #define USBAT_CMD_SET_FEAT 0x81 #define USBAT_CMD_UIO 0x82 /* Methods of accessing UIO register */ #define USBAT_UIO_READ 1 #define USBAT_UIO_WRITE 0 /* Qualifier bits */ #define USBAT_QUAL_FCQ 0x20 /* full compare */ #define USBAT_QUAL_ALQ 0x10 /* auto load subcount */ /* USBAT Flash Media status types */ #define USBAT_FLASH_MEDIA_NONE 0 #define USBAT_FLASH_MEDIA_CF 1 /* USBAT Flash Media change types */ #define USBAT_FLASH_MEDIA_SAME 0 #define USBAT_FLASH_MEDIA_CHANGED 1 /* USBAT ATA registers */ #define USBAT_ATA_DATA 0x10 /* read/write data (R/W) */ #define USBAT_ATA_FEATURES 0x11 /* set features (W) */ #define USBAT_ATA_ERROR 0x11 /* error (R) */ #define USBAT_ATA_SECCNT 0x12 /* sector count (R/W) */ #define USBAT_ATA_SECNUM 0x13 /* sector number (R/W) */ #define USBAT_ATA_LBA_ME 0x14 /* cylinder low (R/W) */ #define USBAT_ATA_LBA_HI 0x15 /* cylinder high (R/W) */ #define USBAT_ATA_DEVICE 0x16 /* head/device selection (R/W) */ #define USBAT_ATA_STATUS 0x17 /* device status (R) */ #define USBAT_ATA_CMD 0x17 /* device command (W) */ #define USBAT_ATA_ALTSTATUS 0x0E /* status (no clear IRQ) (R) */ /* USBAT User I/O Data registers */ #define USBAT_UIO_EPAD 0x80 /* Enable Peripheral Control Signals */ #define USBAT_UIO_CDT 0x40 /* Card Detect (Read Only) */ /* CDT = ACKD & !UI1 & !UI0 */ #define USBAT_UIO_1 0x20 /* I/O 1 */ #define USBAT_UIO_0 0x10 /* I/O 0 */ #define USBAT_UIO_EPP_ATA 0x08 /* 1=EPP mode, 0=ATA mode */ #define USBAT_UIO_UI1 0x04 /* Input 1 */ #define USBAT_UIO_UI0 0x02 /* Input 0 */ #define USBAT_UIO_INTR_ACK 0x01 /* Interrupt (ATA/ISA)/Acknowledge (EPP) */ /* USBAT User I/O Enable registers */ #define USBAT_UIO_DRVRST 0x80 /* Reset Peripheral */ #define USBAT_UIO_ACKD 0x40 /* Enable Card Detect */ #define USBAT_UIO_OE1 0x20 /* I/O 1 set=output/clr=input */ /* If ACKD=1, set OE1 to 1 also. */ #define USBAT_UIO_OE0 0x10 /* I/O 0 set=output/clr=input */ #define USBAT_UIO_ADPRST 0x01 /* Reset SCM chip */ /* USBAT Features */ #define USBAT_FEAT_ETEN 0x80 /* External trigger enable */ #define USBAT_FEAT_U1 0x08 #define USBAT_FEAT_U0 0x04 #define USBAT_FEAT_ET1 0x02 #define USBAT_FEAT_ET2 0x01 struct usbat_info { int devicetype; /* Used for Flash readers only */ unsigned long sectors; /* total sector count */ unsigned long ssize; /* sector size in bytes */ unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ }; #define short_pack(LSB,MSB) ( ((u16)(LSB)) | ( ((u16)(MSB))<<8 ) ) #define LSB_of(s) ((s)&0xFF) #define MSB_of(s) ((s)>>8) static int transferred = 0; static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us); static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us); static int init_usbat_cd(struct us_data *us); static int init_usbat_flash(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id usbat_usb_ids[] = { # include "unusual_usbat.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usbat_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev usbat_unusual_dev_list[] = { # include "unusual_usbat.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* * Convenience function to produce an ATA read/write sectors command * Use cmd=0x20 for read, cmd=0x30 for write */ static void usbat_pack_ata_sector_cmd(unsigned char *buf, unsigned char thistime, u32 sector, unsigned char cmd) { buf[0] = 0; buf[1] = thistime; buf[2] = sector & 0xFF; buf[3] = (sector >> 8) & 0xFF; buf[4] = (sector >> 16) & 0xFF; buf[5] = 0xE0 | ((sector >> 24) & 0x0F); buf[6] = cmd; } /* * Convenience function to get the device type (flash or hp8200) */ static int usbat_get_device_type(struct us_data *us) { return ((struct usbat_info*)us->extra)->devicetype; } /* * Read a register from the device */ static int usbat_read(struct us_data *us, unsigned char access, unsigned char reg, unsigned char *content) { return usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, access | USBAT_CMD_READ_REG, 0xC0, (u16)reg, 0, content, 1); } /* * Write to a register on the device */ static int usbat_write(struct us_data *us, unsigned char access, unsigned char reg, unsigned char content) { return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, access | USBAT_CMD_WRITE_REG, 0x40, short_pack(reg, content), 0, NULL, 0); } /* * Convenience function to perform a bulk read */ static int usbat_bulk_read(struct us_data *us, void* buf, unsigned int len, int use_sg) { if (len == 0) return USB_STOR_XFER_GOOD; usb_stor_dbg(us, "len = %d\n", len); return usb_stor_bulk_transfer_sg(us, us->recv_bulk_pipe, buf, len, use_sg, NULL); } /* * Convenience function to perform a bulk write */ static int usbat_bulk_write(struct us_data *us, void* buf, unsigned int len, int use_sg) { if (len == 0) return USB_STOR_XFER_GOOD; usb_stor_dbg(us, "len = %d\n", len); return usb_stor_bulk_transfer_sg(us, us->send_bulk_pipe, buf, len, use_sg, NULL); } /* * Some USBAT-specific commands can only be executed over a command transport * This transport allows one (len=8) or two (len=16) vendor-specific commands * to be executed. */ static int usbat_execute_command(struct us_data *us, unsigned char *commands, unsigned int len) { return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, USBAT_CMD_EXEC_CMD, 0x40, 0, 0, commands, len); } /* * Read the status register */ static int usbat_get_status(struct us_data *us, unsigned char *status) { int rc; rc = usbat_read(us, USBAT_ATA, USBAT_ATA_STATUS, status); usb_stor_dbg(us, "0x%02X\n", *status); return rc; } /* * Check the device status */ static int usbat_check_status(struct us_data *us) { unsigned char *reply = us->iobuf; int rc; rc = usbat_get_status(us, reply); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_FAILED; /* error/check condition (0x51 is ok) */ if (*reply & 0x01 && *reply != 0x51) return USB_STOR_TRANSPORT_FAILED; /* device fault */ if (*reply & 0x20) return USB_STOR_TRANSPORT_FAILED; return USB_STOR_TRANSPORT_GOOD; } /* * Stores critical information in internal registers in preparation for the execution * of a conditional usbat_read_blocks or usbat_write_blocks call. */ static int usbat_set_shuttle_features(struct us_data *us, unsigned char external_trigger, unsigned char epp_control, unsigned char mask_byte, unsigned char test_pattern, unsigned char subcountH, unsigned char subcountL) { unsigned char *command = us->iobuf; command[0] = 0x40; command[1] = USBAT_CMD_SET_FEAT; /* * The only bit relevant to ATA access is bit 6 * which defines 8 bit data access (set) or 16 bit (unset) */ command[2] = epp_control; /* * If FCQ is set in the qualifier (defined in R/W cmd), then bits U0, U1, * ET1 and ET2 define an external event to be checked for on event of a * _read_blocks or _write_blocks operation. The read/write will not take * place unless the defined trigger signal is active. */ command[3] = external_trigger; /* * The resultant byte of the mask operation (see mask_byte) is compared for * equivalence with this test pattern. If equal, the read/write will take * place. */ command[4] = test_pattern; /* * This value is logically ANDed with the status register field specified * in the read/write command. */ command[5] = mask_byte; /* * If ALQ is set in the qualifier, this field contains the address of the * registers where the byte count should be read for transferring the data. * If ALQ is not set, then this field contains the number of bytes to be * transferred. */ command[6] = subcountL; command[7] = subcountH; return usbat_execute_command(us, command, 8); } /* * Block, waiting for an ATA device to become not busy or to report * an error condition. */ static int usbat_wait_not_busy(struct us_data *us, int minutes) { int i; int result; unsigned char *status = us->iobuf; /* * Synchronizing cache on a CDR could take a heck of a long time, * but probably not more than 10 minutes or so. On the other hand, * doing a full blank on a CDRW at speed 1 will take about 75 * minutes! */ for (i=0; i<1200+minutes*60; i++) { result = usbat_get_status(us, status); if (result!=USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (*status & 0x01) { /* check condition */ result = usbat_read(us, USBAT_ATA, 0x10, status); return USB_STOR_TRANSPORT_FAILED; } if (*status & 0x20) /* device fault */ return USB_STOR_TRANSPORT_FAILED; if ((*status & 0x80)==0x00) { /* not busy */ usb_stor_dbg(us, "Waited not busy for %d steps\n", i); return USB_STOR_TRANSPORT_GOOD; } if (i<500) msleep(10); /* 5 seconds */ else if (i<700) msleep(50); /* 10 seconds */ else if (i<1200) msleep(100); /* 50 seconds */ else msleep(1000); /* X minutes */ } usb_stor_dbg(us, "Waited not busy for %d minutes, timing out\n", minutes); return USB_STOR_TRANSPORT_FAILED; } /* * Read block data from the data register */ static int usbat_read_block(struct us_data *us, void* buf, unsigned short len, int use_sg) { int result; unsigned char *command = us->iobuf; if (!len) return USB_STOR_TRANSPORT_GOOD; command[0] = 0xC0; command[1] = USBAT_ATA | USBAT_CMD_READ_BLOCK; command[2] = USBAT_ATA_DATA; command[3] = 0; command[4] = 0; command[5] = 0; command[6] = LSB_of(len); command[7] = MSB_of(len); result = usbat_execute_command(us, command, 8); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; result = usbat_bulk_read(us, buf, len, use_sg); return (result == USB_STOR_XFER_GOOD ? USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR); } /* * Write block data via the data register */ static int usbat_write_block(struct us_data *us, unsigned char access, void* buf, unsigned short len, int minutes, int use_sg) { int result; unsigned char *command = us->iobuf; if (!len) return USB_STOR_TRANSPORT_GOOD; command[0] = 0x40; command[1] = access | USBAT_CMD_WRITE_BLOCK; command[2] = USBAT_ATA_DATA; command[3] = 0; command[4] = 0; command[5] = 0; command[6] = LSB_of(len); command[7] = MSB_of(len); result = usbat_execute_command(us, command, 8); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; result = usbat_bulk_write(us, buf, len, use_sg); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return usbat_wait_not_busy(us, minutes); } /* * Process read and write requests */ static int usbat_hp8200e_rw_block_test(struct us_data *us, unsigned char access, unsigned char *registers, unsigned char *data_out, unsigned short num_registers, unsigned char data_reg, unsigned char status_reg, unsigned char timeout, unsigned char qualifier, int direction, void *buf, unsigned short len, int use_sg, int minutes) { int result; unsigned int pipe = (direction == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; unsigned char *command = us->iobuf; int i, j; int cmdlen; unsigned char *data = us->iobuf; unsigned char *status = us->iobuf; BUG_ON(num_registers > US_IOBUF_SIZE/2); for (i=0; i<20; i++) { /* * The first time we send the full command, which consists * of downloading the SCSI command followed by downloading * the data via a write-and-test. Any other time we only * send the command to download the data -- the SCSI command * is still 'active' in some sense in the device. * * We're only going to try sending the data 10 times. After * that, we just return a failure. */ if (i==0) { cmdlen = 16; /* * Write to multiple registers * Not really sure the 0x07, 0x17, 0xfc, 0xe7 is * necessary here, but that's what came out of the * trace every single time. */ command[0] = 0x40; command[1] = access | USBAT_CMD_WRITE_REGS; command[2] = 0x07; command[3] = 0x17; command[4] = 0xFC; command[5] = 0xE7; command[6] = LSB_of(num_registers*2); command[7] = MSB_of(num_registers*2); } else cmdlen = 8; /* Conditionally read or write blocks */ command[cmdlen-8] = (direction==DMA_TO_DEVICE ? 0x40 : 0xC0); command[cmdlen-7] = access | (direction==DMA_TO_DEVICE ? USBAT_CMD_COND_WRITE_BLOCK : USBAT_CMD_COND_READ_BLOCK); command[cmdlen-6] = data_reg; command[cmdlen-5] = status_reg; command[cmdlen-4] = timeout; command[cmdlen-3] = qualifier; command[cmdlen-2] = LSB_of(len); command[cmdlen-1] = MSB_of(len); result = usbat_execute_command(us, command, cmdlen); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (i==0) { for (j=0; j<num_registers; j++) { data[j<<1] = registers[j]; data[1+(j<<1)] = data_out[j]; } result = usbat_bulk_write(us, data, num_registers*2, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } result = usb_stor_bulk_transfer_sg(us, pipe, buf, len, use_sg, NULL); /* * If we get a stall on the bulk download, we'll retry * the bulk download -- but not the SCSI command because * in some sense the SCSI command is still 'active' and * waiting for the data. Don't ask me why this should be; * I'm only following what the Windoze driver did. * * Note that a stall for the test-and-read/write command means * that the test failed. In this case we're testing to make * sure that the device is error-free * (i.e. bit 0 -- CHK -- of status is 0). The most likely * hypothesis is that the USBAT chip somehow knows what * the device will accept, but doesn't give the device any * data until all data is received. Thus, the device would * still be waiting for the first byte of data if a stall * occurs, even if the stall implies that some data was * transferred. */ if (result == USB_STOR_XFER_SHORT || result == USB_STOR_XFER_STALLED) { /* * If we're reading and we stalled, then clear * the bulk output pipe only the first time. */ if (direction==DMA_FROM_DEVICE && i==0) { if (usb_stor_clear_halt(us, us->send_bulk_pipe) < 0) return USB_STOR_TRANSPORT_ERROR; } /* * Read status: is the device angry, or just busy? */ result = usbat_read(us, USBAT_ATA, direction==DMA_TO_DEVICE ? USBAT_ATA_STATUS : USBAT_ATA_ALTSTATUS, status); if (result!=USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (*status & 0x01) /* check condition */ return USB_STOR_TRANSPORT_FAILED; if (*status & 0x20) /* device fault */ return USB_STOR_TRANSPORT_FAILED; usb_stor_dbg(us, "Redoing %s\n", str_write_read(direction == DMA_TO_DEVICE)); } else if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; else return usbat_wait_not_busy(us, minutes); } usb_stor_dbg(us, "Bummer! %s bulk data 20 times failed\n", direction == DMA_TO_DEVICE ? "Writing" : "Reading"); return USB_STOR_TRANSPORT_FAILED; } /* * Write to multiple registers: * Allows us to write specific data to any registers. The data to be written * gets packed in this sequence: reg0, data0, reg1, data1, ..., regN, dataN * which gets sent through bulk out. * Not designed for large transfers of data! */ static int usbat_multiple_write(struct us_data *us, unsigned char *registers, unsigned char *data_out, unsigned short num_registers) { int i, result; unsigned char *data = us->iobuf; unsigned char *command = us->iobuf; BUG_ON(num_registers > US_IOBUF_SIZE/2); /* Write to multiple registers, ATA access */ command[0] = 0x40; command[1] = USBAT_ATA | USBAT_CMD_WRITE_REGS; /* No relevance */ command[2] = 0; command[3] = 0; command[4] = 0; command[5] = 0; /* Number of bytes to be transferred (incl. addresses and data) */ command[6] = LSB_of(num_registers*2); command[7] = MSB_of(num_registers*2); /* The setup command */ result = usbat_execute_command(us, command, 8); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* Create the reg/data, reg/data sequence */ for (i=0; i<num_registers; i++) { data[i<<1] = registers[i]; data[1+(i<<1)] = data_out[i]; } /* Send the data */ result = usbat_bulk_write(us, data, num_registers*2, 0); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_get_device_type(us) == USBAT_DEV_HP8200) return usbat_wait_not_busy(us, 0); else return USB_STOR_TRANSPORT_GOOD; } /* * Conditionally read blocks from device: * Allows us to read blocks from a specific data register, based upon the * condition that a status register can be successfully masked with a status * qualifier. If this condition is not initially met, the read will wait * up until a maximum amount of time has elapsed, as specified by timeout. * The read will start when the condition is met, otherwise the command aborts. * * The qualifier defined here is not the value that is masked, it defines * conditions for the write to take place. The actual masked qualifier (and * other related details) are defined beforehand with _set_shuttle_features(). */ static int usbat_read_blocks(struct us_data *us, void* buffer, int len, int use_sg) { int result; unsigned char *command = us->iobuf; command[0] = 0xC0; command[1] = USBAT_ATA | USBAT_CMD_COND_READ_BLOCK; command[2] = USBAT_ATA_DATA; command[3] = USBAT_ATA_STATUS; command[4] = 0xFD; /* Timeout (ms); */ command[5] = USBAT_QUAL_FCQ; command[6] = LSB_of(len); command[7] = MSB_of(len); /* Multiple block read setup command */ result = usbat_execute_command(us, command, 8); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_FAILED; /* Read the blocks we just asked for */ result = usbat_bulk_read(us, buffer, len, use_sg); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_FAILED; return USB_STOR_TRANSPORT_GOOD; } /* * Conditionally write blocks to device: * Allows us to write blocks to a specific data register, based upon the * condition that a status register can be successfully masked with a status * qualifier. If this condition is not initially met, the write will wait * up until a maximum amount of time has elapsed, as specified by timeout. * The read will start when the condition is met, otherwise the command aborts. * * The qualifier defined here is not the value that is masked, it defines * conditions for the write to take place. The actual masked qualifier (and * other related details) are defined beforehand with _set_shuttle_features(). */ static int usbat_write_blocks(struct us_data *us, void* buffer, int len, int use_sg) { int result; unsigned char *command = us->iobuf; command[0] = 0x40; command[1] = USBAT_ATA | USBAT_CMD_COND_WRITE_BLOCK; command[2] = USBAT_ATA_DATA; command[3] = USBAT_ATA_STATUS; command[4] = 0xFD; /* Timeout (ms) */ command[5] = USBAT_QUAL_FCQ; command[6] = LSB_of(len); command[7] = MSB_of(len); /* Multiple block write setup command */ result = usbat_execute_command(us, command, 8); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_FAILED; /* Write the data */ result = usbat_bulk_write(us, buffer, len, use_sg); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_FAILED; return USB_STOR_TRANSPORT_GOOD; } /* * Read the User IO register */ static int usbat_read_user_io(struct us_data *us, unsigned char *data_flags) { int result; result = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, USBAT_CMD_UIO, 0xC0, 0, 0, data_flags, USBAT_UIO_READ); usb_stor_dbg(us, "UIO register reads %02X\n", *data_flags); return result; } /* * Write to the User IO register */ static int usbat_write_user_io(struct us_data *us, unsigned char enable_flags, unsigned char data_flags) { return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, USBAT_CMD_UIO, 0x40, short_pack(enable_flags, data_flags), 0, NULL, USBAT_UIO_WRITE); } /* * Reset the device * Often needed on media change. */ static int usbat_device_reset(struct us_data *us) { int rc; /* * Reset peripheral, enable peripheral control signals * (bring reset signal up) */ rc = usbat_write_user_io(us, USBAT_UIO_DRVRST | USBAT_UIO_OE1 | USBAT_UIO_OE0, USBAT_UIO_EPAD | USBAT_UIO_1); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* * Enable peripheral control signals * (bring reset signal down) */ rc = usbat_write_user_io(us, USBAT_UIO_OE1 | USBAT_UIO_OE0, USBAT_UIO_EPAD | USBAT_UIO_1); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * Enable card detect */ static int usbat_device_enable_cdt(struct us_data *us) { int rc; /* Enable peripheral control signals and card detect */ rc = usbat_write_user_io(us, USBAT_UIO_ACKD | USBAT_UIO_OE1 | USBAT_UIO_OE0, USBAT_UIO_EPAD | USBAT_UIO_1); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * Determine if media is present. */ static int usbat_flash_check_media_present(struct us_data *us, unsigned char *uio) { if (*uio & USBAT_UIO_UI0) { usb_stor_dbg(us, "no media detected\n"); return USBAT_FLASH_MEDIA_NONE; } return USBAT_FLASH_MEDIA_CF; } /* * Determine if media has changed since last operation */ static int usbat_flash_check_media_changed(struct us_data *us, unsigned char *uio) { if (*uio & USBAT_UIO_0) { usb_stor_dbg(us, "media change detected\n"); return USBAT_FLASH_MEDIA_CHANGED; } return USBAT_FLASH_MEDIA_SAME; } /* * Check for media change / no media and handle the situation appropriately */ static int usbat_flash_check_media(struct us_data *us, struct usbat_info *info) { int rc; unsigned char *uio = us->iobuf; rc = usbat_read_user_io(us, uio); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* Check for media existence */ rc = usbat_flash_check_media_present(us, uio); if (rc == USBAT_FLASH_MEDIA_NONE) { info->sense_key = 0x02; info->sense_asc = 0x3A; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } /* Check for media change */ rc = usbat_flash_check_media_changed(us, uio); if (rc == USBAT_FLASH_MEDIA_CHANGED) { /* Reset and re-enable card detect */ rc = usbat_device_reset(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; rc = usbat_device_enable_cdt(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; msleep(50); rc = usbat_read_user_io(us, uio); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; info->sense_key = UNIT_ATTENTION; info->sense_asc = 0x28; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } return USB_STOR_TRANSPORT_GOOD; } /* * Determine whether we are controlling a flash-based reader/writer, * or a HP8200-based CD drive. * Sets transport functions as appropriate. */ static int usbat_identify_device(struct us_data *us, struct usbat_info *info) { int rc; unsigned char status; if (!us || !info) return USB_STOR_TRANSPORT_ERROR; rc = usbat_device_reset(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; msleep(500); /* * In attempt to distinguish between HP CDRW's and Flash readers, we now * execute the IDENTIFY PACKET DEVICE command. On ATA devices (i.e. flash * readers), this command should fail with error. On ATAPI devices (i.e. * CDROM drives), it should succeed. */ rc = usbat_write(us, USBAT_ATA, USBAT_ATA_CMD, 0xA1); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; rc = usbat_get_status(us, &status); if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* Check for error bit, or if the command 'fell through' */ if (status == 0xA1 || !(status & 0x01)) { /* Device is HP 8200 */ usb_stor_dbg(us, "Detected HP8200 CDRW\n"); info->devicetype = USBAT_DEV_HP8200; } else { /* Device is a CompactFlash reader/writer */ usb_stor_dbg(us, "Detected Flash reader/writer\n"); info->devicetype = USBAT_DEV_FLASH; } return USB_STOR_TRANSPORT_GOOD; } /* * Set the transport function based on the device type */ static int usbat_set_transport(struct us_data *us, struct usbat_info *info, int devicetype) { if (!info->devicetype) info->devicetype = devicetype; if (!info->devicetype) usbat_identify_device(us, info); switch (info->devicetype) { default: return USB_STOR_TRANSPORT_ERROR; case USBAT_DEV_HP8200: us->transport = usbat_hp8200e_transport; break; case USBAT_DEV_FLASH: us->transport = usbat_flash_transport; break; } return 0; } /* * Read the media capacity */ static int usbat_flash_get_sector_count(struct us_data *us, struct usbat_info *info) { unsigned char registers[3] = { USBAT_ATA_SECCNT, USBAT_ATA_DEVICE, USBAT_ATA_CMD, }; unsigned char command[3] = { 0x01, 0xA0, 0xEC }; unsigned char *reply; unsigned char status; int rc; if (!us || !info) return USB_STOR_TRANSPORT_ERROR; reply = kmalloc(512, GFP_NOIO); if (!reply) return USB_STOR_TRANSPORT_ERROR; /* ATA command : IDENTIFY DEVICE */ rc = usbat_multiple_write(us, registers, command, 3); if (rc != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Gah! identify_device failed\n"); rc = USB_STOR_TRANSPORT_ERROR; goto leave; } /* Read device status */ if (usbat_get_status(us, &status) != USB_STOR_XFER_GOOD) { rc = USB_STOR_TRANSPORT_ERROR; goto leave; } msleep(100); /* Read the device identification data */ rc = usbat_read_block(us, reply, 512, 0); if (rc != USB_STOR_TRANSPORT_GOOD) goto leave; info->sectors = ((u32)(reply[117]) << 24) | ((u32)(reply[116]) << 16) | ((u32)(reply[115]) << 8) | ((u32)(reply[114]) ); rc = USB_STOR_TRANSPORT_GOOD; leave: kfree(reply); return rc; } /* * Read data from device */ static int usbat_flash_read_data(struct us_data *us, struct usbat_info *info, u32 sector, u32 sectors) { unsigned char registers[7] = { USBAT_ATA_FEATURES, USBAT_ATA_SECCNT, USBAT_ATA_SECNUM, USBAT_ATA_LBA_ME, USBAT_ATA_LBA_HI, USBAT_ATA_DEVICE, USBAT_ATA_STATUS, }; unsigned char command[7]; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; result = usbat_flash_check_media(us, info); if (result != USB_STOR_TRANSPORT_GOOD) return result; /* * we're working in LBA mode. according to the ATA spec, * we can support up to 28-bit addressing. I don't know if Jumpshot * supports beyond 24-bit addressing. It's kind of hard to test * since it requires > 8GB CF card. */ if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; /* * Since we don't read more than 64 KB at a time, we have to create * a bounce buffer and move the data a piece at a time between the * bounce buffer and the actual transfer buffer. */ alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { /* * loop, never allocate or transfer more than 64k at once * (min(128k, 255*info->ssize) is the real limit) */ len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; /* ATA command 0x20 (READ SECTORS) */ usbat_pack_ata_sector_cmd(command, thistime, sector, 0x20); /* Write/execute ATA read command */ result = usbat_multiple_write(us, registers, command, 7); if (result != USB_STOR_TRANSPORT_GOOD) goto leave; /* Read the data we just requested */ result = usbat_read_blocks(us, buffer, len, 0); if (result != USB_STOR_TRANSPORT_GOOD) goto leave; usb_stor_dbg(us, "%d bytes\n", len); /* Store the data in the transfer buffer */ usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, TO_XFER_BUF); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } /* * Write data to device */ static int usbat_flash_write_data(struct us_data *us, struct usbat_info *info, u32 sector, u32 sectors) { unsigned char registers[7] = { USBAT_ATA_FEATURES, USBAT_ATA_SECCNT, USBAT_ATA_SECNUM, USBAT_ATA_LBA_ME, USBAT_ATA_LBA_HI, USBAT_ATA_DEVICE, USBAT_ATA_STATUS, }; unsigned char command[7]; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; result = usbat_flash_check_media(us, info); if (result != USB_STOR_TRANSPORT_GOOD) return result; /* * we're working in LBA mode. according to the ATA spec, * we can support up to 28-bit addressing. I don't know if the device * supports beyond 24-bit addressing. It's kind of hard to test * since it requires > 8GB media. */ if (sector > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; totallen = sectors * info->ssize; /* * Since we don't write more than 64 KB at a time, we have to create * a bounce buffer and move the data a piece at a time between the * bounce buffer and the actual transfer buffer. */ alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { /* * loop, never allocate or transfer more than 64k at once * (min(128k, 255*info->ssize) is the real limit) */ len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; /* Get the data from the transfer buffer */ usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, FROM_XFER_BUF); /* ATA command 0x30 (WRITE SECTORS) */ usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30); /* Write/execute ATA write command */ result = usbat_multiple_write(us, registers, command, 7); if (result != USB_STOR_TRANSPORT_GOOD) goto leave; /* Write the data */ result = usbat_write_blocks(us, buffer, len, 0); if (result != USB_STOR_TRANSPORT_GOOD) goto leave; sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return result; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } /* * Squeeze a potentially huge (> 65535 byte) read10 command into * a little ( <= 65535 byte) ATAPI pipe */ static int usbat_hp8200e_handle_read10(struct us_data *us, unsigned char *registers, unsigned char *data, struct scsi_cmnd *srb) { int result = USB_STOR_TRANSPORT_GOOD; unsigned char *buffer; unsigned int len; unsigned int sector; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; usb_stor_dbg(us, "transfersize %d\n", srb->transfersize); if (scsi_bufflen(srb) < 0x10000) { result = usbat_hp8200e_rw_block_test(us, USBAT_ATA, registers, data, 19, USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD, (USBAT_QUAL_FCQ | USBAT_QUAL_ALQ), DMA_FROM_DEVICE, scsi_sglist(srb), scsi_bufflen(srb), scsi_sg_count(srb), 1); return result; } /* * Since we're requesting more data than we can handle in * a single read command (max is 64k-1), we will perform * multiple reads, but each read must be in multiples of * a sector. Luckily the sector size is in srb->transfersize * (see linux/drivers/scsi/sr.c). */ if (data[7+0] == GPCMD_READ_CD) { len = short_pack(data[7+9], data[7+8]); len <<= 16; len |= data[7+7]; usb_stor_dbg(us, "GPCMD_READ_CD: len %d\n", len); srb->transfersize = scsi_bufflen(srb)/len; } if (!srb->transfersize) { srb->transfersize = 2048; /* A guess */ usb_stor_dbg(us, "transfersize 0, forcing %d\n", srb->transfersize); } /* * Since we only read in one block at a time, we have to create * a bounce buffer and move the data a piece at a time between the * bounce buffer and the actual transfer buffer. */ len = (65535/srb->transfersize) * srb->transfersize; usb_stor_dbg(us, "Max read is %d bytes\n", len); len = min(len, scsi_bufflen(srb)); buffer = kmalloc(len, GFP_NOIO); if (buffer == NULL) /* bloody hell! */ return USB_STOR_TRANSPORT_FAILED; sector = short_pack(data[7+3], data[7+2]); sector <<= 16; sector |= short_pack(data[7+5], data[7+4]); transferred = 0; while (transferred != scsi_bufflen(srb)) { if (len > scsi_bufflen(srb) - transferred) len = scsi_bufflen(srb) - transferred; data[3] = len&0xFF; /* (cylL) = expected length (L) */ data[4] = (len>>8)&0xFF; /* (cylH) = expected length (H) */ /* Fix up the SCSI command sector and num sectors */ data[7+2] = MSB_of(sector>>16); /* SCSI command sector */ data[7+3] = LSB_of(sector>>16); data[7+4] = MSB_of(sector&0xFFFF); data[7+5] = LSB_of(sector&0xFFFF); if (data[7+0] == GPCMD_READ_CD) data[7+6] = 0; data[7+7] = MSB_of(len / srb->transfersize); /* SCSI command */ data[7+8] = LSB_of(len / srb->transfersize); /* num sectors */ result = usbat_hp8200e_rw_block_test(us, USBAT_ATA, registers, data, 19, USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD, (USBAT_QUAL_FCQ | USBAT_QUAL_ALQ), DMA_FROM_DEVICE, buffer, len, 0, 1); if (result != USB_STOR_TRANSPORT_GOOD) break; /* Store the data in the transfer buffer */ usb_stor_access_xfer_buf(buffer, len, srb, &sg, &sg_offset, TO_XFER_BUF); /* Update the amount transferred and the sector number */ transferred += len; sector += len / srb->transfersize; } /* while transferred != scsi_bufflen(srb) */ kfree(buffer); return result; } static int usbat_select_and_test_registers(struct us_data *us) { int selector; unsigned char *status = us->iobuf; /* try device = master, then device = slave. */ for (selector = 0xA0; selector <= 0xB0; selector += 0x10) { if (usbat_write(us, USBAT_ATA, USBAT_ATA_DEVICE, selector) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_read(us, USBAT_ATA, USBAT_ATA_STATUS, status) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_read(us, USBAT_ATA, USBAT_ATA_DEVICE, status) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_HI, status) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_write(us, USBAT_ATA, USBAT_ATA_LBA_ME, 0x55) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_write(us, USBAT_ATA, USBAT_ATA_LBA_HI, 0xAA) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } return USB_STOR_TRANSPORT_GOOD; } /* * Initialize the USBAT processor and the storage device */ static int init_usbat(struct us_data *us, int devicetype) { int rc; struct usbat_info *info; unsigned char subcountH = USBAT_ATA_LBA_HI; unsigned char subcountL = USBAT_ATA_LBA_ME; unsigned char *status = us->iobuf; us->extra = kzalloc(sizeof(struct usbat_info), GFP_NOIO); if (!us->extra) return -ENOMEM; info = (struct usbat_info *) (us->extra); /* Enable peripheral control signals */ rc = usbat_write_user_io(us, USBAT_UIO_OE1 | USBAT_UIO_OE0, USBAT_UIO_EPAD | USBAT_UIO_1); if (rc != USB_STOR_XFER_GOOD) return -EIO; usb_stor_dbg(us, "INIT 1\n"); msleep(2000); rc = usbat_read_user_io(us, status); if (rc != USB_STOR_TRANSPORT_GOOD) return -EIO; usb_stor_dbg(us, "INIT 2\n"); rc = usbat_read_user_io(us, status); if (rc != USB_STOR_XFER_GOOD) return -EIO; rc = usbat_read_user_io(us, status); if (rc != USB_STOR_XFER_GOOD) return -EIO; usb_stor_dbg(us, "INIT 3\n"); rc = usbat_select_and_test_registers(us); if (rc != USB_STOR_TRANSPORT_GOOD) return -EIO; usb_stor_dbg(us, "INIT 4\n"); rc = usbat_read_user_io(us, status); if (rc != USB_STOR_XFER_GOOD) return -EIO; usb_stor_dbg(us, "INIT 5\n"); /* Enable peripheral control signals and card detect */ rc = usbat_device_enable_cdt(us); if (rc != USB_STOR_TRANSPORT_GOOD) return -EIO; usb_stor_dbg(us, "INIT 6\n"); rc = usbat_read_user_io(us, status); if (rc != USB_STOR_XFER_GOOD) return -EIO; usb_stor_dbg(us, "INIT 7\n"); msleep(1400); rc = usbat_read_user_io(us, status); if (rc != USB_STOR_XFER_GOOD) return -EIO; usb_stor_dbg(us, "INIT 8\n"); rc = usbat_select_and_test_registers(us); if (rc != USB_STOR_TRANSPORT_GOOD) return -EIO; usb_stor_dbg(us, "INIT 9\n"); /* At this point, we need to detect which device we are using */ if (usbat_set_transport(us, info, devicetype)) return -EIO; usb_stor_dbg(us, "INIT 10\n"); if (usbat_get_device_type(us) == USBAT_DEV_FLASH) { subcountH = 0x02; subcountL = 0x00; } rc = usbat_set_shuttle_features(us, (USBAT_FEAT_ETEN | USBAT_FEAT_ET2 | USBAT_FEAT_ET1), 0x00, 0x88, 0x08, subcountH, subcountL); if (rc != USB_STOR_XFER_GOOD) return -EIO; usb_stor_dbg(us, "INIT 11\n"); return 0; } /* * Transport for the HP 8200e */ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us) { int result; unsigned char *status = us->iobuf; unsigned char registers[32]; unsigned char data[32]; unsigned int len; int i; len = scsi_bufflen(srb); /* * Send A0 (ATA PACKET COMMAND). * Note: I guess we're never going to get any of the ATA * commands... just ATA Packet Commands. */ registers[0] = USBAT_ATA_FEATURES; registers[1] = USBAT_ATA_SECCNT; registers[2] = USBAT_ATA_SECNUM; registers[3] = USBAT_ATA_LBA_ME; registers[4] = USBAT_ATA_LBA_HI; registers[5] = USBAT_ATA_DEVICE; registers[6] = USBAT_ATA_CMD; data[0] = 0x00; data[1] = 0x00; data[2] = 0x00; data[3] = len&0xFF; /* (cylL) = expected length (L) */ data[4] = (len>>8)&0xFF; /* (cylH) = expected length (H) */ data[5] = 0xB0; /* (device sel) = slave */ data[6] = 0xA0; /* (command) = ATA PACKET COMMAND */ for (i=7; i<19; i++) { registers[i] = 0x10; data[i] = (i-7 >= srb->cmd_len) ? 0 : srb->cmnd[i-7]; } result = usbat_get_status(us, status); usb_stor_dbg(us, "Status = %02X\n", *status); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (srb->cmnd[0] == TEST_UNIT_READY) transferred = 0; if (srb->sc_data_direction == DMA_TO_DEVICE) { result = usbat_hp8200e_rw_block_test(us, USBAT_ATA, registers, data, 19, USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD, (USBAT_QUAL_FCQ | USBAT_QUAL_ALQ), DMA_TO_DEVICE, scsi_sglist(srb), len, scsi_sg_count(srb), 10); if (result == USB_STOR_TRANSPORT_GOOD) { transferred += len; usb_stor_dbg(us, "Wrote %08X bytes\n", transferred); } return result; } else if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == GPCMD_READ_CD) { return usbat_hp8200e_handle_read10(us, registers, data, srb); } if (len > 0xFFFF) { usb_stor_dbg(us, "Error: len = %08X... what do I do now?\n", len); return USB_STOR_TRANSPORT_ERROR; } result = usbat_multiple_write(us, registers, data, 7); if (result != USB_STOR_TRANSPORT_GOOD) return result; /* * Write the 12-byte command header. * * If the command is BLANK then set the timer for 75 minutes. * Otherwise set it for 10 minutes. * * NOTE: THE 8200 DOCUMENTATION STATES THAT BLANKING A CDRW * AT SPEED 4 IS UNRELIABLE!!! */ result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12, srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0); if (result != USB_STOR_TRANSPORT_GOOD) return result; /* If there is response data to be read in then do it here. */ if (len != 0 && (srb->sc_data_direction == DMA_FROM_DEVICE)) { /* How many bytes to read in? Check cylL register */ if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) != USB_STOR_XFER_GOOD) { return USB_STOR_TRANSPORT_ERROR; } if (len > 0xFF) { /* need to read cylH also */ len = *status; if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_HI, status) != USB_STOR_XFER_GOOD) { return USB_STOR_TRANSPORT_ERROR; } len += ((unsigned int) *status)<<8; } else len = *status; result = usbat_read_block(us, scsi_sglist(srb), len, scsi_sg_count(srb)); } return result; } /* * Transport for USBAT02-based CompactFlash and similar storage devices */ static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us) { int rc; struct usbat_info *info = (struct usbat_info *) (us->extra); unsigned long block, blocks; unsigned char *ptr = us->iobuf; static const unsigned char inquiry_response[36] = { 0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00 }; if (srb->cmnd[0] == INQUIRY) { usb_stor_dbg(us, "INQUIRY - Returning bogus response\n"); memcpy(ptr, inquiry_response, sizeof(inquiry_response)); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_CAPACITY) { rc = usbat_flash_check_media(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; rc = usbat_flash_get_sector_count(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; /* hard coded 512 byte sectors as per ATA spec */ info->ssize = 0x200; usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n", info->sectors, info->ssize); /* * build the reply * note: must return the sector number of the last sector, * *not* the total number of sectors */ ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); ((__be32 *) ptr)[1] = cpu_to_be32(info->ssize); usb_stor_set_xfer_buf(ptr, 8, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SELECT_10) { usb_stor_dbg(us, "Gah! MODE_SELECT_10\n"); return USB_STOR_TRANSPORT_ERROR; } if (srb->cmnd[0] == READ_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); usb_stor_dbg(us, "READ_10: read block 0x%04lx count %ld\n", block, blocks); return usbat_flash_read_data(us, info, block, blocks); } if (srb->cmnd[0] == READ_12) { /* * I don't think we'll ever see a READ_12 but support it anyway */ block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); usb_stor_dbg(us, "READ_12: read block 0x%04lx count %ld\n", block, blocks); return usbat_flash_read_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); usb_stor_dbg(us, "WRITE_10: write block 0x%04lx count %ld\n", block, blocks); return usbat_flash_write_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_12) { /* * I don't think we'll ever see a WRITE_12 but support it anyway */ block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); usb_stor_dbg(us, "WRITE_12: write block 0x%04lx count %ld\n", block, blocks); return usbat_flash_write_data(us, info, block, blocks); } if (srb->cmnd[0] == TEST_UNIT_READY) { usb_stor_dbg(us, "TEST_UNIT_READY\n"); rc = usbat_flash_check_media(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; return usbat_check_status(us); } if (srb->cmnd[0] == REQUEST_SENSE) { usb_stor_dbg(us, "REQUEST_SENSE\n"); memset(ptr, 0, 18); ptr[0] = 0xF0; ptr[2] = info->sense_key; ptr[7] = 11; ptr[12] = info->sense_asc; ptr[13] = info->sense_ascq; usb_stor_set_xfer_buf(ptr, 18, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { /* * sure. whatever. not like we can stop the user from popping * the media out of the device (no locking doors, etc) */ return USB_STOR_TRANSPORT_GOOD; } usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n", srb->cmnd[0], srb->cmnd[0]); info->sense_key = 0x05; info->sense_asc = 0x20; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } static int init_usbat_cd(struct us_data *us) { return init_usbat(us, USBAT_DEV_HP8200); } static int init_usbat_flash(struct us_data *us) { return init_usbat(us, USBAT_DEV_FLASH); } static struct scsi_host_template usbat_host_template; static int usbat_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - usbat_usb_ids) + usbat_unusual_dev_list, &usbat_host_template); if (result) return result; /* * The actual transport will be determined later by the * initialization routine; this is just a placeholder. */ us->transport_name = "Shuttle USBAT"; us->transport = usbat_flash_transport; us->transport_reset = usb_stor_CB_reset; us->max_lun = 0; result = usb_stor_probe2(us); return result; } static struct usb_driver usbat_driver = { .name = DRV_NAME, .probe = usbat_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = usbat_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(usbat_driver, usbat_host_template, DRV_NAME); |
4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | // SPDX-License-Identifier: GPL-2.0-or-later /* ZD1211 USB-WLAN driver for Linux * * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> */ #include <linux/errno.h> #include <linux/string.h> #include "zd_def.h" #include "zd_rf.h" #include "zd_mac.h" #include "zd_chip.h" static const char * const rfs[] = { [0] = "unknown RF0", [1] = "unknown RF1", [UW2451_RF] = "UW2451_RF", [UCHIP_RF] = "UCHIP_RF", [AL2230_RF] = "AL2230_RF", [AL7230B_RF] = "AL7230B_RF", [THETA_RF] = "THETA_RF", [AL2210_RF] = "AL2210_RF", [MAXIM_NEW_RF] = "MAXIM_NEW_RF", [UW2453_RF] = "UW2453_RF", [AL2230S_RF] = "AL2230S_RF", [RALINK_RF] = "RALINK_RF", [INTERSIL_RF] = "INTERSIL_RF", [RF2959_RF] = "RF2959_RF", [MAXIM_NEW2_RF] = "MAXIM_NEW2_RF", [PHILIPS_RF] = "PHILIPS_RF", }; const char *zd_rf_name(u8 type) { if (type & 0xf0) type = 0; return rfs[type]; } void zd_rf_init(struct zd_rf *rf) { memset(rf, 0, sizeof(*rf)); /* default to update channel integration, as almost all RF's do want * this */ rf->update_channel_int = 1; } void zd_rf_clear(struct zd_rf *rf) { if (rf->clear) rf->clear(rf); ZD_MEMCLEAR(rf, sizeof(*rf)); } int zd_rf_init_hw(struct zd_rf *rf, u8 type) { int r = 0; int t; struct zd_chip *chip = zd_rf_to_chip(rf); ZD_ASSERT(mutex_is_locked(&chip->mutex)); switch (type) { case RF2959_RF: r = zd_rf_init_rf2959(rf); break; case AL2230_RF: case AL2230S_RF: r = zd_rf_init_al2230(rf); break; case AL7230B_RF: r = zd_rf_init_al7230b(rf); break; case MAXIM_NEW_RF: case UW2453_RF: r = zd_rf_init_uw2453(rf); break; default: dev_err(zd_chip_dev(chip), "RF %s %#x is not supported\n", zd_rf_name(type), type); rf->type = 0; return -ENODEV; } if (r) return r; rf->type = type; r = zd_chip_lock_phy_regs(chip); if (r) return r; t = rf->init_hw(rf); r = zd_chip_unlock_phy_regs(chip); if (t) r = t; return r; } int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size) { return scnprintf(buffer, size, "%s", zd_rf_name(rf->type)); } int zd_rf_set_channel(struct zd_rf *rf, u8 channel) { int r; ZD_ASSERT(mutex_is_locked(&zd_rf_to_chip(rf)->mutex)); if (channel < MIN_CHANNEL24) return -EINVAL; if (channel > MAX_CHANNEL24) return -EINVAL; dev_dbg_f(zd_chip_dev(zd_rf_to_chip(rf)), "channel: %d\n", channel); r = rf->set_channel(rf, channel); if (r >= 0) rf->channel = channel; return r; } int zd_switch_radio_on(struct zd_rf *rf) { int r, t; struct zd_chip *chip = zd_rf_to_chip(rf); ZD_ASSERT(mutex_is_locked(&chip->mutex)); r = zd_chip_lock_phy_regs(chip); if (r) return r; t = rf->switch_radio_on(rf); r = zd_chip_unlock_phy_regs(chip); if (t) r = t; return r; } int zd_switch_radio_off(struct zd_rf *rf) { int r, t; struct zd_chip *chip = zd_rf_to_chip(rf); /* TODO: move phy regs handling to zd_chip */ ZD_ASSERT(mutex_is_locked(&chip->mutex)); r = zd_chip_lock_phy_regs(chip); if (r) return r; t = rf->switch_radio_off(rf); r = zd_chip_unlock_phy_regs(chip); if (t) r = t; return r; } int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel) { if (!rf->patch_6m_band_edge) return 0; return rf->patch_6m_band_edge(rf, channel); } int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel) { return zd_chip_generic_patch_6m_band(zd_rf_to_chip(rf), channel); } |
1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Quickcam cameras initialization data * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> */ #define MODULE_NAME "tv8532" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("TV8532 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ __u8 packet; }; static const struct v4l2_pix_format sif_mode[] = { {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /* TV-8532A (ICM532A) registers (LE) */ #define R00_PART_CONTROL 0x00 #define LATENT_CHANGE 0x80 #define EXPO_CHANGE 0x04 #define R01_TIMING_CONTROL_LOW 0x01 #define CMD_EEprom_Open 0x30 #define CMD_EEprom_Close 0x29 #define R03_TABLE_ADDR 0x03 #define R04_WTRAM_DATA_L 0x04 #define R05_WTRAM_DATA_M 0x05 #define R06_WTRAM_DATA_H 0x06 #define R07_TABLE_LEN 0x07 #define R08_RAM_WRITE_ACTION 0x08 #define R0C_AD_WIDTHL 0x0c #define R0D_AD_WIDTHH 0x0d #define R0E_AD_HEIGHTL 0x0e #define R0F_AD_HEIGHTH 0x0f #define R10_AD_COL_BEGINL 0x10 #define R11_AD_COL_BEGINH 0x11 #define MIRROR 0x04 /* [10] */ #define R14_AD_ROW_BEGINL 0x14 #define R15_AD_ROWBEGINH 0x15 #define R1C_AD_EXPOSE_TIMEL 0x1c #define R20_GAIN_G1L 0x20 #define R21_GAIN_G1H 0x21 #define R22_GAIN_RL 0x22 #define R23_GAIN_RH 0x23 #define R24_GAIN_BL 0x24 #define R25_GAIN_BH 0x25 #define R26_GAIN_G2L 0x26 #define R27_GAIN_G2H 0x27 #define R28_QUANT 0x28 #define R29_LINE 0x29 #define R2C_POLARITY 0x2c #define R2D_POINT 0x2d #define R2E_POINTH 0x2e #define R2F_POINTB 0x2f #define R30_POINTBH 0x30 #define R31_UPD 0x31 #define R2A_HIGH_BUDGET 0x2a #define R2B_LOW_BUDGET 0x2b #define R34_VID 0x34 #define R35_VIDH 0x35 #define R36_PID 0x36 #define R37_PIDH 0x37 #define R39_Test1 0x39 /* GPIO */ #define R3B_Test3 0x3b /* GPIO */ #define R83_AD_IDH 0x83 #define R91_AD_SLOPEREG 0x91 #define R94_AD_BITCONTROL 0x94 static const u8 eeprom_data[][3] = { /* dataH dataM dataL */ {0x01, 0x00, 0x01}, {0x01, 0x80, 0x11}, {0x05, 0x00, 0x14}, {0x05, 0x00, 0x1c}, {0x0d, 0x00, 0x1e}, {0x05, 0x00, 0x1f}, {0x05, 0x05, 0x19}, {0x05, 0x01, 0x1b}, {0x05, 0x09, 0x1e}, {0x0d, 0x89, 0x2e}, {0x05, 0x89, 0x2f}, {0x05, 0x0d, 0xd9}, {0x05, 0x09, 0xf1}, }; /* write 1 byte */ static void reg_w1(struct gspca_dev *gspca_dev, __u16 index, __u8 value) { gspca_dev->usb_buf[0] = value; usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x02, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, 1, 500); } /* write 2 bytes */ static void reg_w2(struct gspca_dev *gspca_dev, u16 index, u16 value) { gspca_dev->usb_buf[0] = value; gspca_dev->usb_buf[1] = value >> 8; usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x02, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, 2, 500); } static void tv_8532WriteEEprom(struct gspca_dev *gspca_dev) { int i; reg_w1(gspca_dev, R01_TIMING_CONTROL_LOW, CMD_EEprom_Open); for (i = 0; i < ARRAY_SIZE(eeprom_data); i++) { reg_w1(gspca_dev, R03_TABLE_ADDR, i); reg_w1(gspca_dev, R04_WTRAM_DATA_L, eeprom_data[i][2]); reg_w1(gspca_dev, R05_WTRAM_DATA_M, eeprom_data[i][1]); reg_w1(gspca_dev, R06_WTRAM_DATA_H, eeprom_data[i][0]); reg_w1(gspca_dev, R08_RAM_WRITE_ACTION, 0); } reg_w1(gspca_dev, R07_TABLE_LEN, i); reg_w1(gspca_dev, R01_TIMING_CONTROL_LOW, CMD_EEprom_Close); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); return 0; } static void tv_8532_setReg(struct gspca_dev *gspca_dev) { reg_w1(gspca_dev, R3B_Test3, 0x0a); /* Test0Sel = 10 */ /******************************************************/ reg_w1(gspca_dev, R0E_AD_HEIGHTL, 0x90); reg_w1(gspca_dev, R0F_AD_HEIGHTH, 0x01); reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, 0x018f); reg_w1(gspca_dev, R10_AD_COL_BEGINL, 0x44); /* begin active line */ reg_w1(gspca_dev, R11_AD_COL_BEGINH, 0x00); /* mirror and digital gain */ reg_w1(gspca_dev, R14_AD_ROW_BEGINL, 0x0a); reg_w1(gspca_dev, R94_AD_BITCONTROL, 0x02); reg_w1(gspca_dev, R91_AD_SLOPEREG, 0x00); reg_w1(gspca_dev, R00_PART_CONTROL, LATENT_CHANGE | EXPO_CHANGE); /* = 0x84 */ } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { tv_8532WriteEEprom(gspca_dev); return 0; } static void setexposure(struct gspca_dev *gspca_dev, s32 val) { reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, val); reg_w1(gspca_dev, R00_PART_CONTROL, LATENT_CHANGE | EXPO_CHANGE); /* 0x84 */ } static void setgain(struct gspca_dev *gspca_dev, s32 val) { reg_w2(gspca_dev, R20_GAIN_G1L, val); reg_w2(gspca_dev, R22_GAIN_RL, val); reg_w2(gspca_dev, R24_GAIN_BL, val); reg_w2(gspca_dev, R26_GAIN_G2L, val); } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w1(gspca_dev, R0C_AD_WIDTHL, 0xe8); /* 0x20; 0x0c */ reg_w1(gspca_dev, R0D_AD_WIDTHH, 0x03); /************************************************/ reg_w1(gspca_dev, R28_QUANT, 0x90); /* 0x72 compressed mode 0x28 */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { /* 176x144 */ reg_w1(gspca_dev, R29_LINE, 0x41); /* CIF - 2 lines/packet */ } else { /* 352x288 */ reg_w1(gspca_dev, R29_LINE, 0x81); /* CIF - 2 lines/packet */ } /************************************************/ reg_w1(gspca_dev, R2C_POLARITY, 0x10); /* slow clock */ reg_w1(gspca_dev, R2D_POINT, 0x14); reg_w1(gspca_dev, R2E_POINTH, 0x01); reg_w1(gspca_dev, R2F_POINTB, 0x12); reg_w1(gspca_dev, R30_POINTBH, 0x01); tv_8532_setReg(gspca_dev); /************************************************/ reg_w1(gspca_dev, R31_UPD, 0x01); /* update registers */ msleep(200); reg_w1(gspca_dev, R31_UPD, 0x00); /* end update */ gspca_dev->empty_packet = 0; /* check the empty packets */ sd->packet = 0; /* ignore the first packets */ return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { reg_w1(gspca_dev, R3B_Test3, 0x0b); /* Test0Sel = 11 = GPIO */ } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int packet_type0, packet_type1; packet_type0 = packet_type1 = INTER_PACKET; if (gspca_dev->empty_packet) { gspca_dev->empty_packet = 0; sd->packet = gspca_dev->pixfmt.height / 2; packet_type0 = FIRST_PACKET; } else if (sd->packet == 0) return; /* 2 more lines in 352x288 ! */ sd->packet--; if (sd->packet == 0) packet_type1 = LAST_PACKET; /* each packet contains: * - header 2 bytes * - RGRG line * - 4 bytes * - GBGB line * - 4 bytes */ gspca_frame_add(gspca_dev, packet_type0, data + 2, gspca_dev->pixfmt.width); gspca_frame_add(gspca_dev, packet_type1, data + gspca_dev->pixfmt.width + 5, gspca_dev->pixfmt.width); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: setexposure(gspca_dev, ctrl->val); break; case V4L2_CID_GAIN: setgain(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 2); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 0x18f, 1, 0x18f); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 0x7ff, 1, 0x100); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x046d, 0x0920)}, {USB_DEVICE(0x046d, 0x0921)}, {USB_DEVICE(0x0545, 0x808b)}, {USB_DEVICE(0x0545, 0x8333)}, {USB_DEVICE(0x0923, 0x010f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 | // SPDX-License-Identifier: GPL-2.0+ /* * Driver for Datafab USB Compact Flash reader * * datafab driver v0.1: * * First release * * Current development and maintenance by: * (c) 2000 Jimmie Mayfield (mayfield+datafab@sackheads.org) * * Many thanks to Robert Baruch for the SanDisk SmartMedia reader driver * which I used as a template for this driver. * * Some bugfixes and scatter-gather code by Gregory P. Smith * (greg-usb@electricrain.com) * * Fix for media change by Joerg Schneider (js@joergschneider.com) * * Other contributors: * (c) 2002 Alan Stern <stern@rowland.org> */ /* * This driver attempts to support USB CompactFlash reader/writer devices * based on Datafab USB-to-ATA chips. It was specifically developed for the * Datafab MDCFE-B USB CompactFlash reader but has since been found to work * with a variety of Datafab-based devices from a number of manufacturers. * I've received a report of this driver working with a Datafab-based * SmartMedia device though please be aware that I'm personally unable to * test SmartMedia support. * * This driver supports reading and writing. If you're truly paranoid, * however, you can force the driver into a write-protected state by setting * the WP enable bits in datafab_handle_mode_sense(). See the comments * in that routine. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-datafab" MODULE_DESCRIPTION("Driver for Datafab USB Compact Flash reader"); MODULE_AUTHOR("Jimmie Mayfield <mayfield+datafab@sackheads.org>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); struct datafab_info { unsigned long sectors; /* total sector count */ unsigned long ssize; /* sector size in bytes */ signed char lun; /* used for dual-slot readers */ /* the following aren't used yet */ unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ }; static int datafab_determine_lun(struct us_data *us, struct datafab_info *info); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id datafab_usb_ids[] = { # include "unusual_datafab.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, datafab_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev datafab_unusual_dev_list[] = { # include "unusual_datafab.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV static inline int datafab_bulk_read(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; usb_stor_dbg(us, "len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, len, NULL); } static inline int datafab_bulk_write(struct us_data *us, unsigned char *data, unsigned int len) { if (len == 0) return USB_STOR_XFER_GOOD; usb_stor_dbg(us, "len = %d\n", len); return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, data, len, NULL); } static int datafab_read_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Datafab // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. // if (sectors > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; if (info->lun == -1) { result = datafab_determine_lun(us, info); if (result != USB_STOR_TRANSPORT_GOOD) return result; } totallen = sectors * info->ssize; // Since we don't read more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 + (info->lun << 4); command[5] |= (sector >> 24) & 0x0F; command[6] = 0x20; command[7] = 0x01; // send the read command result = datafab_bulk_write(us, command, 8); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result result = datafab_bulk_read(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; // Store the data in the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, TO_XFER_BUF); sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int datafab_write_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) { unsigned char *command = us->iobuf; unsigned char *reply = us->iobuf; unsigned char *buffer; unsigned char thistime; unsigned int totallen, alloclen; int len, result; unsigned int sg_offset = 0; struct scatterlist *sg = NULL; // we're working in LBA mode. according to the ATA spec, // we can support up to 28-bit addressing. I don't know if Datafab // supports beyond 24-bit addressing. It's kind of hard to test // since it requires > 8GB CF card. // if (sectors > 0x0FFFFFFF) return USB_STOR_TRANSPORT_ERROR; if (info->lun == -1) { result = datafab_determine_lun(us, info); if (result != USB_STOR_TRANSPORT_GOOD) return result; } totallen = sectors * info->ssize; // Since we don't write more than 64 KB at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer. alloclen = min(totallen, 65536u); buffer = kmalloc(alloclen, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; do { // loop, never allocate or transfer more than 64k at once // (min(128k, 255*info->ssize) is the real limit) len = min(totallen, alloclen); thistime = (len / info->ssize) & 0xff; // Get the data from the transfer buffer usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &sg_offset, FROM_XFER_BUF); command[0] = 0; command[1] = thistime; command[2] = sector & 0xFF; command[3] = (sector >> 8) & 0xFF; command[4] = (sector >> 16) & 0xFF; command[5] = 0xE0 + (info->lun << 4); command[5] |= (sector >> 24) & 0x0F; command[6] = 0x30; command[7] = 0x02; // send the command result = datafab_bulk_write(us, command, 8); if (result != USB_STOR_XFER_GOOD) goto leave; // send the data result = datafab_bulk_write(us, buffer, len); if (result != USB_STOR_XFER_GOOD) goto leave; // read the result result = datafab_bulk_read(us, reply, 2); if (result != USB_STOR_XFER_GOOD) goto leave; if (reply[0] != 0x50 && reply[1] != 0) { usb_stor_dbg(us, "Gah! write return code: %02x %02x\n", reply[0], reply[1]); goto leave; } sector += thistime; totallen -= len; } while (totallen > 0); kfree(buffer); return USB_STOR_TRANSPORT_GOOD; leave: kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } static int datafab_determine_lun(struct us_data *us, struct datafab_info *info) { // Dual-slot readers can be thought of as dual-LUN devices. // We need to determine which card slot is being used. // We'll send an IDENTIFY DEVICE command and see which LUN responds... // // There might be a better way of doing this? static const unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 }; unsigned char *command = us->iobuf; unsigned char *buf; int count = 0, rc; if (!info) return USB_STOR_TRANSPORT_ERROR; memcpy(command, scommand, 8); buf = kmalloc(512, GFP_NOIO); if (!buf) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "locating...\n"); // we'll try 3 times before giving up... // while (count++ < 3) { command[5] = 0xa0; rc = datafab_bulk_write(us, command, 8); if (rc != USB_STOR_XFER_GOOD) { rc = USB_STOR_TRANSPORT_ERROR; goto leave; } rc = datafab_bulk_read(us, buf, 512); if (rc == USB_STOR_XFER_GOOD) { info->lun = 0; rc = USB_STOR_TRANSPORT_GOOD; goto leave; } command[5] = 0xb0; rc = datafab_bulk_write(us, command, 8); if (rc != USB_STOR_XFER_GOOD) { rc = USB_STOR_TRANSPORT_ERROR; goto leave; } rc = datafab_bulk_read(us, buf, 512); if (rc == USB_STOR_XFER_GOOD) { info->lun = 1; rc = USB_STOR_TRANSPORT_GOOD; goto leave; } msleep(20); } rc = USB_STOR_TRANSPORT_ERROR; leave: kfree(buf); return rc; } static int datafab_id_device(struct us_data *us, struct datafab_info *info) { // this is a variation of the ATA "IDENTIFY DEVICE" command...according // to the ATA spec, 'Sector Count' isn't used but the Windows driver // sets this bit so we do too... // static const unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 }; unsigned char *command = us->iobuf; unsigned char *reply; int rc; if (!info) return USB_STOR_TRANSPORT_ERROR; if (info->lun == -1) { rc = datafab_determine_lun(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; } memcpy(command, scommand, 8); reply = kmalloc(512, GFP_NOIO); if (!reply) return USB_STOR_TRANSPORT_ERROR; command[5] += (info->lun << 4); rc = datafab_bulk_write(us, command, 8); if (rc != USB_STOR_XFER_GOOD) { rc = USB_STOR_TRANSPORT_ERROR; goto leave; } // we'll go ahead and extract the media capacity while we're here... // rc = datafab_bulk_read(us, reply, 512); if (rc == USB_STOR_XFER_GOOD) { // capacity is at word offset 57-58 // info->sectors = ((u32)(reply[117]) << 24) | ((u32)(reply[116]) << 16) | ((u32)(reply[115]) << 8) | ((u32)(reply[114]) ); rc = USB_STOR_TRANSPORT_GOOD; goto leave; } rc = USB_STOR_TRANSPORT_ERROR; leave: kfree(reply); return rc; } static int datafab_handle_mode_sense(struct us_data *us, struct scsi_cmnd * srb, int sense_6) { static const unsigned char rw_err_page[12] = { 0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0 }; static const unsigned char cache_page[12] = { 0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const unsigned char rbac_page[12] = { 0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0 }; static const unsigned char timer_page[8] = { 0x1C, 0x6, 0, 0, 0, 0 }; unsigned char pc, page_code; unsigned int i = 0; struct datafab_info *info = (struct datafab_info *) (us->extra); unsigned char *ptr = us->iobuf; // most of this stuff is just a hack to get things working. the // datafab reader doesn't present a SCSI interface so we // fudge the SCSI commands... // pc = srb->cmnd[2] >> 6; page_code = srb->cmnd[2] & 0x3F; switch (pc) { case 0x0: usb_stor_dbg(us, "Current values\n"); break; case 0x1: usb_stor_dbg(us, "Changeable values\n"); break; case 0x2: usb_stor_dbg(us, "Default values\n"); break; case 0x3: usb_stor_dbg(us, "Saves values\n"); break; } memset(ptr, 0, 8); if (sense_6) { ptr[2] = 0x00; // WP enable: 0x80 i = 4; } else { ptr[3] = 0x00; // WP enable: 0x80 i = 8; } switch (page_code) { default: // vendor-specific mode info->sense_key = 0x05; info->sense_asc = 0x24; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; case 0x1: memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; case 0x8: memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); break; case 0x1B: memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); break; case 0x1C: memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); break; case 0x3F: // retrieve all pages memcpy(ptr + i, timer_page, sizeof(timer_page)); i += sizeof(timer_page); memcpy(ptr + i, rbac_page, sizeof(rbac_page)); i += sizeof(rbac_page); memcpy(ptr + i, cache_page, sizeof(cache_page)); i += sizeof(cache_page); memcpy(ptr + i, rw_err_page, sizeof(rw_err_page)); i += sizeof(rw_err_page); break; } if (sense_6) ptr[0] = i - 1; else ((__be16 *) ptr)[0] = cpu_to_be16(i - 2); usb_stor_set_xfer_buf(ptr, i, srb); return USB_STOR_TRANSPORT_GOOD; } static void datafab_info_destructor(void *extra) { // this routine is a placeholder... // currently, we don't allocate any extra memory so we're okay } // Transport for the Datafab MDCFE-B // static int datafab_transport(struct scsi_cmnd *srb, struct us_data *us) { struct datafab_info *info; int rc; unsigned long block, blocks; unsigned char *ptr = us->iobuf; static const unsigned char inquiry_reply[8] = { 0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00 }; if (!us->extra) { us->extra = kzalloc(sizeof(struct datafab_info), GFP_NOIO); if (!us->extra) return USB_STOR_TRANSPORT_ERROR; us->extra_destructor = datafab_info_destructor; ((struct datafab_info *)us->extra)->lun = -1; } info = (struct datafab_info *) (us->extra); if (srb->cmnd[0] == INQUIRY) { usb_stor_dbg(us, "INQUIRY - Returning bogus response\n"); memcpy(ptr, inquiry_reply, sizeof(inquiry_reply)); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_CAPACITY) { info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec rc = datafab_id_device(us, info); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, %ld bytes per sector\n", info->sectors, info->ssize); // build the reply // we need the last sector, not the number of sectors ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); ((__be32 *) ptr)[1] = cpu_to_be32(info->ssize); usb_stor_set_xfer_buf(ptr, 8, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SELECT_10) { usb_stor_dbg(us, "Gah! MODE_SELECT_10\n"); return USB_STOR_TRANSPORT_ERROR; } // don't bother implementing READ_6 or WRITE_6. // if (srb->cmnd[0] == READ_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); usb_stor_dbg(us, "READ_10: read block 0x%04lx count %ld\n", block, blocks); return datafab_read_data(us, info, block, blocks); } if (srb->cmnd[0] == READ_12) { // we'll probably never see a READ_12 but we'll do it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); usb_stor_dbg(us, "READ_12: read block 0x%04lx count %ld\n", block, blocks); return datafab_read_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_10) { block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); usb_stor_dbg(us, "WRITE_10: write block 0x%04lx count %ld\n", block, blocks); return datafab_write_data(us, info, block, blocks); } if (srb->cmnd[0] == WRITE_12) { // we'll probably never see a WRITE_12 but we'll do it anyway... // block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) | ((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5])); blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | ((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9])); usb_stor_dbg(us, "WRITE_12: write block 0x%04lx count %ld\n", block, blocks); return datafab_write_data(us, info, block, blocks); } if (srb->cmnd[0] == TEST_UNIT_READY) { usb_stor_dbg(us, "TEST_UNIT_READY\n"); return datafab_id_device(us, info); } if (srb->cmnd[0] == REQUEST_SENSE) { usb_stor_dbg(us, "REQUEST_SENSE - Returning faked response\n"); // this response is pretty bogus right now. eventually if necessary // we can set the correct sense data. so far though it hasn't been // necessary // memset(ptr, 0, 18); ptr[0] = 0xF0; ptr[2] = info->sense_key; ptr[7] = 11; ptr[12] = info->sense_asc; ptr[13] = info->sense_ascq; usb_stor_set_xfer_buf(ptr, 18, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == MODE_SENSE) { usb_stor_dbg(us, "MODE_SENSE_6 detected\n"); return datafab_handle_mode_sense(us, srb, 1); } if (srb->cmnd[0] == MODE_SENSE_10) { usb_stor_dbg(us, "MODE_SENSE_10 detected\n"); return datafab_handle_mode_sense(us, srb, 0); } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { /* * sure. whatever. not like we can stop the user from * popping the media out of the device (no locking doors, etc) */ return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == START_STOP) { /* * this is used by sd.c'check_scsidisk_media_change to detect * media change */ usb_stor_dbg(us, "START_STOP\n"); /* * the first datafab_id_device after a media change returns * an error (determined experimentally) */ rc = datafab_id_device(us, info); if (rc == USB_STOR_TRANSPORT_GOOD) { info->sense_key = NO_SENSE; srb->result = SUCCESS; } else { info->sense_key = UNIT_ATTENTION; srb->result = SAM_STAT_CHECK_CONDITION; } return rc; } usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n", srb->cmnd[0], srb->cmnd[0]); info->sense_key = 0x05; info->sense_asc = 0x20; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } static struct scsi_host_template datafab_host_template; static int datafab_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - datafab_usb_ids) + datafab_unusual_dev_list, &datafab_host_template); if (result) return result; us->transport_name = "Datafab Bulk-Only"; us->transport = datafab_transport; us->transport_reset = usb_stor_Bulk_reset; us->max_lun = 1; result = usb_stor_probe2(us); return result; } static struct usb_driver datafab_driver = { .name = DRV_NAME, .probe = datafab_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = datafab_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(datafab_driver, datafab_host_template, DRV_NAME); |
16 15 16 16 16 16 16 15 16 16 16 16 16 16 16 16 16 16 15 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 15 15 15 15 16 16 16 16 16 16 16 15 16 16 16 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/xattr.c * * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> * * Fix by Harrison Xing <harrison@mountainviewdata.com>. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>. * Extended attributes for symlinks and special files added per * suggestion of Luka Renko <luka.renko@hermes.si>. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, * Red Hat Inc. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz * and Andreas Gruenbacher <agruen@suse.de>. */ /* * Extended attributes are stored directly in inodes (on file systems with * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl * field contains the block number if an inode uses an additional block. All * attributes must fit in the inode and one additional block. Blocks that * contain the identical set of attributes may be shared among several inodes. * Identical blocks are detected by keeping a cache of blocks that have * recently been accessed. * * The attributes in inodes and on blocks have a different header; the entries * are stored in the same format: * * +------------------+ * | header | * | entry 1 | | * | entry 2 | | growing downwards * | entry 3 | v * | four null bytes | * | . . . | * | value 1 | ^ * | value 3 | | growing upwards * | value 2 | | * +------------------+ * * The header is followed by multiple entry descriptors. In disk blocks, the * entry descriptors are kept sorted. In inodes, they are unsorted. The * attribute values are aligned to the end of the block in no specific order. * * Locking strategy * ---------------- * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem. * EA blocks are only changed if they are exclusive to an inode, so * holding xattr_sem also means that nothing but the EA block's reference * count can change. Multiple writers to the same block are synchronized * by the buffer lock. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/mbcache.h> #include <linux/quotaops.h> #include <linux/iversion.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" #include "acl.h" #ifdef EXT4_XATTR_DEBUG # define ea_idebug(inode, fmt, ...) \ printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \ inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__) # define ea_bdebug(bh, fmt, ...) \ printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \ bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__) #else # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__) # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif static void ext4_xattr_block_cache_insert(struct mb_cache *, struct buffer_head *); static struct buffer_head * ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *, struct mb_cache_entry **); static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, size_t value_count); static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, size_t value_count); static void ext4_xattr_rehash(struct ext4_xattr_header *); static const struct xattr_handler * const ext4_xattr_handler_map[] = { [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler, #ifdef CONFIG_EXT4_FS_POSIX_ACL [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access, [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default, #endif [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler, #ifdef CONFIG_EXT4_FS_SECURITY [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, #endif [EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler, }; const struct xattr_handler * const ext4_xattr_handlers[] = { &ext4_xattr_user_handler, &ext4_xattr_trusted_handler, #ifdef CONFIG_EXT4_FS_SECURITY &ext4_xattr_security_handler, #endif &ext4_xattr_hurd_handler, NULL }; #define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \ inode->i_sb->s_fs_info)->s_ea_block_cache) #define EA_INODE_CACHE(inode) (((struct ext4_sb_info *) \ inode->i_sb->s_fs_info)->s_ea_inode_cache) static int ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array, struct inode *inode); #ifdef CONFIG_LOCKDEP void ext4_xattr_inode_set_class(struct inode *ea_inode) { struct ext4_inode_info *ei = EXT4_I(ea_inode); lockdep_set_subclass(&ea_inode->i_rwsem, 1); (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA); } #endif static __le32 ext4_xattr_block_csum(struct inode *inode, sector_t block_nr, struct ext4_xattr_header *hdr) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __le64 dsk_block_nr = cpu_to_le64(block_nr); __u32 dummy_csum = 0; int offset = offsetof(struct ext4_xattr_header, h_checksum); csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr)); csum = ext4_chksum(csum, (__u8 *)hdr, offset); csum = ext4_chksum(csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); offset += sizeof(dummy_csum); csum = ext4_chksum(csum, (__u8 *)hdr + offset, EXT4_BLOCK_SIZE(inode->i_sb) - offset); return cpu_to_le32(csum); } static int ext4_xattr_block_csum_verify(struct inode *inode, struct buffer_head *bh) { struct ext4_xattr_header *hdr = BHDR(bh); int ret = 1; if (ext4_has_feature_metadata_csum(inode->i_sb)) { lock_buffer(bh); ret = (hdr->h_checksum == ext4_xattr_block_csum(inode, bh->b_blocknr, hdr)); unlock_buffer(bh); } return ret; } static void ext4_xattr_block_csum_set(struct inode *inode, struct buffer_head *bh) { if (ext4_has_feature_metadata_csum(inode->i_sb)) BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode, bh->b_blocknr, BHDR(bh)); } static inline const char *ext4_xattr_prefix(int name_index, struct dentry *dentry) { const struct xattr_handler *handler = NULL; if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map)) handler = ext4_xattr_handler_map[name_index]; if (!xattr_handler_can_list(handler, dentry)) return NULL; return xattr_prefix(handler); } static int check_xattrs(struct inode *inode, struct buffer_head *bh, struct ext4_xattr_entry *entry, void *end, void *value_start, const char *function, unsigned int line) { struct ext4_xattr_entry *e = entry; int err = -EFSCORRUPTED; char *err_str; if (bh) { if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) { err_str = "invalid header"; goto errout; } if (buffer_verified(bh)) return 0; if (!ext4_xattr_block_csum_verify(inode, bh)) { err = -EFSBADCRC; err_str = "invalid checksum"; goto errout; } } else { struct ext4_xattr_ibody_header *header = value_start; header -= 1; if (end - (void *)header < sizeof(*header) + sizeof(u32)) { err_str = "in-inode xattr block too small"; goto errout; } if (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { err_str = "bad magic number in in-inode xattr"; goto errout; } } /* Find the end of the names list */ while (!IS_LAST_ENTRY(e)) { struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); if ((void *)next >= end) { err_str = "e_name out of bounds"; goto errout; } if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) { err_str = "bad e_name length"; goto errout; } e = next; } /* Check the values */ while (!IS_LAST_ENTRY(entry)) { u32 size = le32_to_cpu(entry->e_value_size); unsigned long ea_ino = le32_to_cpu(entry->e_value_inum); if (!ext4_has_feature_ea_inode(inode->i_sb) && ea_ino) { err_str = "ea_inode specified without ea_inode feature enabled"; goto errout; } if (ea_ino && ((ea_ino == EXT4_ROOT_INO) || !ext4_valid_inum(inode->i_sb, ea_ino))) { err_str = "invalid ea_ino"; goto errout; } if (size > EXT4_XATTR_SIZE_MAX) { err_str = "e_value size too large"; goto errout; } if (size != 0 && entry->e_value_inum == 0) { u16 offs = le16_to_cpu(entry->e_value_offs); void *value; /* * The value cannot overlap the names, and the value * with padding cannot extend beyond 'end'. Check both * the padded and unpadded sizes, since the size may * overflow to 0 when adding padding. */ if (offs > end - value_start) { err_str = "e_value out of bounds"; goto errout; } value = value_start + offs; if (value < (void *)e + sizeof(u32) || size > end - value || EXT4_XATTR_SIZE(size) > end - value) { err_str = "overlapping e_value "; goto errout; } } entry = EXT4_XATTR_NEXT(entry); } if (bh) set_buffer_verified(bh); return 0; errout: if (bh) __ext4_error_inode(inode, function, line, 0, -err, "corrupted xattr block %llu: %s", (unsigned long long) bh->b_blocknr, err_str); else __ext4_error_inode(inode, function, line, 0, -err, "corrupted in-inode xattr: %s", err_str); return err; } static inline int __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, const char *function, unsigned int line) { return check_xattrs(inode, bh, BFIRST(bh), bh->b_data + bh->b_size, bh->b_data, function, line); } #define ext4_xattr_check_block(inode, bh) \ __ext4_xattr_check_block((inode), (bh), __func__, __LINE__) int __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header, void *end, const char *function, unsigned int line) { return check_xattrs(inode, NULL, IFIRST(header), end, IFIRST(header), function, line); } static int xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry, void *end, int name_index, const char *name, int sorted) { struct ext4_xattr_entry *entry, *next; size_t name_len; int cmp = 1; if (name == NULL) return -EINVAL; name_len = strlen(name); for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) { next = EXT4_XATTR_NEXT(entry); if ((void *) next >= end) { EXT4_ERROR_INODE(inode, "corrupted xattr entries"); return -EFSCORRUPTED; } cmp = name_index - entry->e_name_index; if (!cmp) cmp = name_len - entry->e_name_len; if (!cmp) cmp = memcmp(name, entry->e_name, name_len); if (cmp <= 0 && (sorted || cmp == 0)) break; } *pentry = entry; return cmp ? -ENODATA : 0; } static u32 ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size) { return ext4_chksum(sbi->s_csum_seed, buffer, size); } static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode) { return ((u64) inode_get_ctime_sec(ea_inode) << 32) | (u32) inode_peek_iversion_raw(ea_inode); } static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count) { inode_set_ctime(ea_inode, (u32)(ref_count >> 32), 0); inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff); } static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode) { return (u32) inode_get_atime_sec(ea_inode); } static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash) { inode_set_atime(ea_inode, hash, 0); } /* * Read the EA value from an inode. */ static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size) { int blocksize = 1 << ea_inode->i_blkbits; int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits; int tail_size = (size % blocksize) ?: blocksize; struct buffer_head *bhs_inline[8]; struct buffer_head **bhs = bhs_inline; int i, ret; if (bh_count > ARRAY_SIZE(bhs_inline)) { bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS); if (!bhs) return -ENOMEM; } ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count, true /* wait */, bhs); if (ret) goto free_bhs; for (i = 0; i < bh_count; i++) { /* There shouldn't be any holes in ea_inode. */ if (!bhs[i]) { ret = -EFSCORRUPTED; goto put_bhs; } memcpy((char *)buf + blocksize * i, bhs[i]->b_data, i < bh_count - 1 ? blocksize : tail_size); } ret = 0; put_bhs: for (i = 0; i < bh_count; i++) brelse(bhs[i]); free_bhs: if (bhs != bhs_inline) kfree(bhs); return ret; } #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode_get_mtime_sec(inode))) static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, u32 ea_inode_hash, struct inode **ea_inode) { struct inode *inode; int err; /* * We have to check for this corruption early as otherwise * iget_locked() could wait indefinitely for the state of our * parent inode. */ if (parent->i_ino == ea_ino) { ext4_error(parent->i_sb, "Parent and EA inode have the same ino %lu", ea_ino); return -EFSCORRUPTED; } inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE); if (IS_ERR(inode)) { err = PTR_ERR(inode); ext4_error(parent->i_sb, "error while reading EA inode %lu err=%d", ea_ino, err); return err; } ext4_xattr_inode_set_class(inode); /* * Check whether this is an old Lustre-style xattr inode. Lustre * implementation does not have hash validation, rather it has a * backpointer from ea_inode to the parent inode. */ if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) && EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino && inode->i_generation == parent->i_generation) { ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE); ext4_xattr_inode_set_ref(inode, 1); } else { inode_lock_nested(inode, I_MUTEX_XATTR); inode->i_flags |= S_NOQUOTA; inode_unlock(inode); } *ea_inode = inode; return 0; } /* Remove entry from mbcache when EA inode is getting evicted */ void ext4_evict_ea_inode(struct inode *inode) { struct mb_cache_entry *oe; if (!EA_INODE_CACHE(inode)) return; /* Wait for entry to get unused so that we can remove it */ while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode), ext4_xattr_inode_get_hash(inode), inode->i_ino))) { mb_cache_entry_wait_unused(oe); mb_cache_entry_put(EA_INODE_CACHE(inode), oe); } } static int ext4_xattr_inode_verify_hashes(struct inode *ea_inode, struct ext4_xattr_entry *entry, void *buffer, size_t size) { u32 hash; /* Verify stored hash matches calculated hash. */ hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size); if (hash != ext4_xattr_inode_get_hash(ea_inode)) return -EFSCORRUPTED; if (entry) { __le32 e_hash, tmp_data; /* Verify entry hash. */ tmp_data = cpu_to_le32(hash); e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len, &tmp_data, 1); /* All good? */ if (e_hash == entry->e_hash) return 0; /* * Not good. Maybe the entry hash was calculated * using the buggy signed char version? */ e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len, &tmp_data, 1); /* Still no match - bad */ if (e_hash != entry->e_hash) return -EFSCORRUPTED; /* Let people know about old hash */ pr_warn_once("ext4: filesystem with signed xattr name hash"); } return 0; } /* * Read xattr value from the EA inode. */ static int ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry, void *buffer, size_t size) { struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode); struct inode *ea_inode; int err; err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum), le32_to_cpu(entry->e_hash), &ea_inode); if (err) { ea_inode = NULL; goto out; } if (i_size_read(ea_inode) != size) { ext4_warning_inode(ea_inode, "ea_inode file size=%llu entry size=%zu", i_size_read(ea_inode), size); err = -EFSCORRUPTED; goto out; } err = ext4_xattr_inode_read(ea_inode, buffer, size); if (err) goto out; if (!ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) { err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer, size); if (err) { ext4_warning_inode(ea_inode, "EA inode hash validation failed"); goto out; } if (ea_inode_cache) mb_cache_entry_create(ea_inode_cache, GFP_NOFS, ext4_xattr_inode_get_hash(ea_inode), ea_inode->i_ino, true /* reusable */); } out: iput(ea_inode); return err; } static int ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct buffer_head *bh = NULL; struct ext4_xattr_entry *entry; size_t size; void *end; int error; struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", name_index, name, buffer, (long)buffer_size); if (!EXT4_I(inode)->i_file_acl) return -ENODATA; ea_idebug(inode, "reading block %llu", (unsigned long long)EXT4_I(inode)->i_file_acl); bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bh)) return PTR_ERR(bh); ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); error = ext4_xattr_check_block(inode, bh); if (error) goto cleanup; ext4_xattr_block_cache_insert(ea_block_cache, bh); entry = BFIRST(bh); end = bh->b_data + bh->b_size; error = xattr_find_entry(inode, &entry, end, name_index, name, 1); if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); error = -ERANGE; if (unlikely(size > EXT4_XATTR_SIZE_MAX)) goto cleanup; if (buffer) { if (size > buffer_size) goto cleanup; if (entry->e_value_inum) { error = ext4_xattr_inode_get(inode, entry, buffer, size); if (error) goto cleanup; } else { u16 offset = le16_to_cpu(entry->e_value_offs); void *p = bh->b_data + offset; if (unlikely(p + size > end)) goto cleanup; memcpy(buffer, p, size); } } error = size; cleanup: brelse(bh); return error; } int ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; struct ext4_inode *raw_inode; struct ext4_iloc iloc; size_t size; void *end; int error; if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) return -ENODATA; error = ext4_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); end = ITAIL(inode, raw_inode); entry = IFIRST(header); error = xattr_find_entry(inode, &entry, end, name_index, name, 0); if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); error = -ERANGE; if (unlikely(size > EXT4_XATTR_SIZE_MAX)) goto cleanup; if (buffer) { if (size > buffer_size) goto cleanup; if (entry->e_value_inum) { error = ext4_xattr_inode_get(inode, entry, buffer, size); if (error) goto cleanup; } else { u16 offset = le16_to_cpu(entry->e_value_offs); void *p = (void *)IFIRST(header) + offset; if (unlikely(p + size > end)) goto cleanup; memcpy(buffer, p, size); } } error = size; cleanup: brelse(iloc.bh); return error; } /* * ext4_xattr_get() * * Copy an extended attribute into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ int ext4_xattr_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { int error; if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; if (strlen(name) > 255) return -ERANGE; down_read(&EXT4_I(inode)->xattr_sem); error = ext4_xattr_ibody_get(inode, name_index, name, buffer, buffer_size); if (error == -ENODATA) error = ext4_xattr_block_get(inode, name_index, name, buffer, buffer_size); up_read(&EXT4_I(inode)->xattr_sem); return error; } static int ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, char *buffer, size_t buffer_size) { size_t rest = buffer_size; for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { const char *prefix; prefix = ext4_xattr_prefix(entry->e_name_index, dentry); if (prefix) { size_t prefix_len = strlen(prefix); size_t size = prefix_len + entry->e_name_len + 1; if (buffer) { if (size > rest) return -ERANGE; memcpy(buffer, prefix, prefix_len); buffer += prefix_len; memcpy(buffer, entry->e_name, entry->e_name_len); buffer += entry->e_name_len; *buffer++ = 0; } rest -= size; } } return buffer_size - rest; /* total size */ } static int ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = d_inode(dentry); struct buffer_head *bh = NULL; int error; ea_idebug(inode, "buffer=%p, buffer_size=%ld", buffer, (long)buffer_size); if (!EXT4_I(inode)->i_file_acl) return 0; ea_idebug(inode, "reading block %llu", (unsigned long long)EXT4_I(inode)->i_file_acl); bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bh)) return PTR_ERR(bh); ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); error = ext4_xattr_check_block(inode, bh); if (error) goto cleanup; ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh); error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size); cleanup: brelse(bh); return error; } static int ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = d_inode(dentry); struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; struct ext4_iloc iloc; int error; if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) return 0; error = ext4_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); error = ext4_xattr_list_entries(dentry, IFIRST(header), buffer, buffer_size); brelse(iloc.bh); return error; } /* * Inode operation listxattr() * * d_inode(dentry)->i_rwsem: don't care * * Copy a list of attribute names into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ ssize_t ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { int ret, ret2; down_read(&EXT4_I(d_inode(dentry))->xattr_sem); ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; if (buffer) { buffer += ret; buffer_size -= ret; } ret = ext4_xattr_block_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; ret += ret2; errout: up_read(&EXT4_I(d_inode(dentry))->xattr_sem); return ret; } /* * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is * not set, set it. */ static void ext4_xattr_update_super_block(handle_t *handle, struct super_block *sb) { if (ext4_has_feature_xattr(sb)) return; BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); if (ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, EXT4_JTR_NONE) == 0) { lock_buffer(EXT4_SB(sb)->s_sbh); ext4_set_feature_xattr(sb); ext4_superblock_csum_set(sb); unlock_buffer(EXT4_SB(sb)->s_sbh); ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); } } int ext4_get_inode_usage(struct inode *inode, qsize_t *usage) { struct ext4_iloc iloc = { .bh = NULL }; struct buffer_head *bh = NULL; struct ext4_inode *raw_inode; struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; qsize_t ea_inode_refs = 0; int ret; lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem); if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { ret = ext4_get_inode_loc(inode, &iloc); if (ret) goto out; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); for (entry = IFIRST(header); !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) if (entry->e_value_inum) ea_inode_refs++; } if (EXT4_I(inode)->i_file_acl) { bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bh)) { ret = PTR_ERR(bh); bh = NULL; goto out; } ret = ext4_xattr_check_block(inode, bh); if (ret) goto out; for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) if (entry->e_value_inum) ea_inode_refs++; } *usage = ea_inode_refs + 1; ret = 0; out: brelse(iloc.bh); brelse(bh); return ret; } static inline size_t round_up_cluster(struct inode *inode, size_t length) { struct super_block *sb = inode->i_sb; size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits + inode->i_blkbits); size_t mask = ~(cluster_size - 1); return (length + cluster_size - 1) & mask; } static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len) { int err; err = dquot_alloc_inode(inode); if (err) return err; err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len)); if (err) dquot_free_inode(inode); return err; } static void ext4_xattr_inode_free_quota(struct inode *parent, struct inode *ea_inode, size_t len) { if (ea_inode && ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) return; dquot_free_space_nodirty(parent, round_up_cluster(parent, len)); dquot_free_inode(parent); } int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode, struct buffer_head *block_bh, size_t value_len, bool is_create) { int credits; int blocks; /* * 1) Owner inode update * 2) Ref count update on old xattr block * 3) new xattr block * 4) block bitmap update for new xattr block * 5) group descriptor for new xattr block * 6) block bitmap update for old xattr block * 7) group descriptor for old block * * 6 & 7 can happen if we have two racing threads T_a and T_b * which are each trying to set an xattr on inodes I_a and I_b * which were both initially sharing an xattr block. */ credits = 7; /* Quota updates. */ credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb); /* * In case of inline data, we may push out the data to a block, * so we need to reserve credits for this eventuality */ if (inode && ext4_has_inline_data(inode)) credits += ext4_writepage_trans_blocks(inode) + 1; /* We are done if ea_inode feature is not enabled. */ if (!ext4_has_feature_ea_inode(sb)) return credits; /* New ea_inode, inode map, block bitmap, group descriptor. */ credits += 4; /* Data blocks. */ blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits; /* Indirection block or one level of extent tree. */ blocks += 1; /* Block bitmap and group descriptor updates for each block. */ credits += blocks * 2; /* Blocks themselves. */ credits += blocks; if (!is_create) { /* Dereference ea_inode holding old xattr value. * Old ea_inode, inode map, block bitmap, group descriptor. */ credits += 4; /* Data blocks for old ea_inode. */ blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits; /* Indirection block or one level of extent tree for old * ea_inode. */ blocks += 1; /* Block bitmap and group descriptor updates for each block. */ credits += blocks * 2; } /* We may need to clone the existing xattr block in which case we need * to increment ref counts for existing ea_inodes referenced by it. */ if (block_bh) { struct ext4_xattr_entry *entry = BFIRST(block_bh); for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) if (entry->e_value_inum) /* Ref count update on ea_inode. */ credits += 1; } return credits; } static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, int ref_change) { struct ext4_iloc iloc; s64 ref_count; int ret; inode_lock_nested(ea_inode, I_MUTEX_XATTR); ret = ext4_reserve_inode_write(handle, ea_inode, &iloc); if (ret) goto out; ref_count = ext4_xattr_inode_get_ref(ea_inode); ref_count += ref_change; ext4_xattr_inode_set_ref(ea_inode, ref_count); if (ref_change > 0) { WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld", ea_inode->i_ino, ref_count); if (ref_count == 1) { WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u", ea_inode->i_ino, ea_inode->i_nlink); set_nlink(ea_inode, 1); ext4_orphan_del(handle, ea_inode); } } else { WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", ea_inode->i_ino, ref_count); if (ref_count == 0) { WARN_ONCE(ea_inode->i_nlink != 1, "EA inode %lu i_nlink=%u", ea_inode->i_ino, ea_inode->i_nlink); clear_nlink(ea_inode); ext4_orphan_add(handle, ea_inode); } } ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc); if (ret) ext4_warning_inode(ea_inode, "ext4_mark_iloc_dirty() failed ret=%d", ret); out: inode_unlock(ea_inode); return ret; } static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode) { return ext4_xattr_inode_update_ref(handle, ea_inode, 1); } static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode) { return ext4_xattr_inode_update_ref(handle, ea_inode, -1); } static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent, struct ext4_xattr_entry *first) { struct inode *ea_inode; struct ext4_xattr_entry *entry; struct ext4_xattr_entry *failed_entry; unsigned int ea_ino; int err, saved_err; for (entry = first; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { if (!entry->e_value_inum) continue; ea_ino = le32_to_cpu(entry->e_value_inum); err = ext4_xattr_inode_iget(parent, ea_ino, le32_to_cpu(entry->e_hash), &ea_inode); if (err) goto cleanup; err = ext4_xattr_inode_inc_ref(handle, ea_inode); if (err) { ext4_warning_inode(ea_inode, "inc ref error %d", err); iput(ea_inode); goto cleanup; } iput(ea_inode); } return 0; cleanup: saved_err = err; failed_entry = entry; for (entry = first; entry != failed_entry; entry = EXT4_XATTR_NEXT(entry)) { if (!entry->e_value_inum) continue; ea_ino = le32_to_cpu(entry->e_value_inum); err = ext4_xattr_inode_iget(parent, ea_ino, le32_to_cpu(entry->e_hash), &ea_inode); if (err) { ext4_warning(parent->i_sb, "cleanup ea_ino %u iget error %d", ea_ino, err); continue; } err = ext4_xattr_inode_dec_ref(handle, ea_inode); if (err) ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err); iput(ea_inode); } return saved_err; } static int ext4_xattr_restart_fn(handle_t *handle, struct inode *inode, struct buffer_head *bh, bool block_csum, bool dirty) { int error; if (bh && dirty) { if (block_csum) ext4_xattr_block_csum_set(inode, bh); error = ext4_handle_dirty_metadata(handle, NULL, bh); if (error) { ext4_warning(inode->i_sb, "Handle metadata (error %d)", error); return error; } } return 0; } static void ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent, struct buffer_head *bh, struct ext4_xattr_entry *first, bool block_csum, struct ext4_xattr_inode_array **ea_inode_array, int extra_credits, bool skip_quota) { struct inode *ea_inode; struct ext4_xattr_entry *entry; struct ext4_iloc iloc; bool dirty = false; unsigned int ea_ino; int err; int credits; void *end; if (block_csum) end = (void *)bh->b_data + bh->b_size; else { ext4_get_inode_loc(parent, &iloc); end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size; } /* One credit for dec ref on ea_inode, one for orphan list addition, */ credits = 2 + extra_credits; for (entry = first; (void *)entry < end && !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { if (!entry->e_value_inum) continue; ea_ino = le32_to_cpu(entry->e_value_inum); err = ext4_xattr_inode_iget(parent, ea_ino, le32_to_cpu(entry->e_hash), &ea_inode); if (err) continue; err = ext4_expand_inode_array(ea_inode_array, ea_inode); if (err) { ext4_warning_inode(ea_inode, "Expand inode array err=%d", err); iput(ea_inode); continue; } err = ext4_journal_ensure_credits_fn(handle, credits, credits, ext4_free_metadata_revoke_credits(parent->i_sb, 1), ext4_xattr_restart_fn(handle, parent, bh, block_csum, dirty)); if (err < 0) { ext4_warning_inode(ea_inode, "Ensure credits err=%d", err); continue; } if (err > 0) { err = ext4_journal_get_write_access(handle, parent->i_sb, bh, EXT4_JTR_NONE); if (err) { ext4_warning_inode(ea_inode, "Re-get write access err=%d", err); continue; } } err = ext4_xattr_inode_dec_ref(handle, ea_inode); if (err) { ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d", err); continue; } if (!skip_quota) ext4_xattr_inode_free_quota(parent, ea_inode, le32_to_cpu(entry->e_value_size)); /* * Forget about ea_inode within the same transaction that * decrements the ref count. This avoids duplicate decrements in * case the rest of the work spills over to subsequent * transactions. */ entry->e_value_inum = 0; entry->e_value_size = 0; dirty = true; } if (dirty) { /* * Note that we are deliberately skipping csum calculation for * the final update because we do not expect any journal * restarts until xattr block is freed. */ err = ext4_handle_dirty_metadata(handle, NULL, bh); if (err) ext4_warning_inode(parent, "handle dirty metadata err=%d", err); } } /* * Release the xattr block BH: If the reference count is > 1, decrement it; * otherwise free the block. */ static void ext4_xattr_release_block(handle_t *handle, struct inode *inode, struct buffer_head *bh, struct ext4_xattr_inode_array **ea_inode_array, int extra_credits) { struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); u32 hash, ref; int error = 0; BUFFER_TRACE(bh, "get_write_access"); error = ext4_journal_get_write_access(handle, inode->i_sb, bh, EXT4_JTR_NONE); if (error) goto out; retry_ref: lock_buffer(bh); hash = le32_to_cpu(BHDR(bh)->h_hash); ref = le32_to_cpu(BHDR(bh)->h_refcount); if (ref == 1) { ea_bdebug(bh, "refcount now=0; freeing"); /* * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect freed block */ if (ea_block_cache) { struct mb_cache_entry *oe; oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, bh->b_blocknr); if (oe) { unlock_buffer(bh); mb_cache_entry_wait_unused(oe); mb_cache_entry_put(ea_block_cache, oe); goto retry_ref; } } get_bh(bh); unlock_buffer(bh); if (ext4_has_feature_ea_inode(inode->i_sb)) ext4_xattr_inode_dec_ref_all(handle, inode, bh, BFIRST(bh), true /* block_csum */, ea_inode_array, extra_credits, true /* skip_quota */); ext4_free_blocks(handle, inode, bh, 0, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } else { ref--; BHDR(bh)->h_refcount = cpu_to_le32(ref); if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) { struct mb_cache_entry *ce; if (ea_block_cache) { ce = mb_cache_entry_get(ea_block_cache, hash, bh->b_blocknr); if (ce) { set_bit(MBE_REUSABLE_B, &ce->e_flags); mb_cache_entry_put(ea_block_cache, ce); } } } ext4_xattr_block_csum_set(inode, bh); /* * Beware of this ugliness: Releasing of xattr block references * from different inodes can race and so we have to protect * from a race where someone else frees the block (and releases * its journal_head) before we are done dirtying the buffer. In * nojournal mode this race is harmless and we actually cannot * call ext4_handle_dirty_metadata() with locked buffer as * that function can call sync_dirty_buffer() so for that case * we handle the dirtying after unlocking the buffer. */ if (ext4_handle_valid(handle)) error = ext4_handle_dirty_metadata(handle, inode, bh); unlock_buffer(bh); if (!ext4_handle_valid(handle)) error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); } out: ext4_std_error(inode->i_sb, error); return; } /* * Find the available free space for EAs. This also returns the total number of * bytes used by EA entries. */ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last, size_t *min_offs, void *base, int *total) { for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_inum && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < *min_offs) *min_offs = offs; } if (total) *total += EXT4_XATTR_LEN(last->e_name_len); } return (*min_offs - ((void *)last - base) - sizeof(__u32)); } /* * Write the value of the EA in an inode. */ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode, const void *buf, int bufsize) { struct buffer_head *bh = NULL; unsigned long block = 0; int blocksize = ea_inode->i_sb->s_blocksize; int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits; int csize, wsize = 0; int ret = 0, ret2 = 0; int retries = 0; retry: while (ret >= 0 && ret < max_blocks) { struct ext4_map_blocks map; map.m_lblk = block += ret; map.m_len = max_blocks -= ret; ret = ext4_map_blocks(handle, ea_inode, &map, EXT4_GET_BLOCKS_CREATE); if (ret <= 0) { ext4_mark_inode_dirty(handle, ea_inode); if (ret == -ENOSPC && ext4_should_retry_alloc(ea_inode->i_sb, &retries)) { ret = 0; goto retry; } break; } } if (ret < 0) return ret; block = 0; while (wsize < bufsize) { brelse(bh); csize = (bufsize - wsize) > blocksize ? blocksize : bufsize - wsize; bh = ext4_getblk(handle, ea_inode, block, 0); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) { WARN_ON_ONCE(1); EXT4_ERROR_INODE(ea_inode, "ext4_getblk() return bh = NULL"); return -EFSCORRUPTED; } ret = ext4_journal_get_write_access(handle, ea_inode->i_sb, bh, EXT4_JTR_NONE); if (ret) goto out; memcpy(bh->b_data, buf, csize); /* * Zero out block tail to avoid writing uninitialized memory * to disk. */ if (csize < blocksize) memset(bh->b_data + csize, 0, blocksize - csize); set_buffer_uptodate(bh); ext4_handle_dirty_metadata(handle, ea_inode, bh); buf += csize; wsize += csize; block += 1; } inode_lock(ea_inode); i_size_write(ea_inode, wsize); ext4_update_i_disksize(ea_inode, wsize); inode_unlock(ea_inode); ret2 = ext4_mark_inode_dirty(handle, ea_inode); if (unlikely(ret2 && !ret)) ret = ret2; out: brelse(bh); return ret; } /* * Create an inode to store the value of a large EA. */ static struct inode *ext4_xattr_inode_create(handle_t *handle, struct inode *inode, u32 hash) { struct inode *ea_inode = NULL; uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) }; int err; if (inode->i_sb->s_root == NULL) { ext4_warning(inode->i_sb, "refuse to create EA inode when umounting"); WARN_ON(1); return ERR_PTR(-EINVAL); } /* * Let the next inode be the goal, so we try and allocate the EA inode * in the same group, or nearby one. */ ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, S_IFREG | 0600, NULL, inode->i_ino + 1, owner, EXT4_EA_INODE_FL); if (!IS_ERR(ea_inode)) { ea_inode->i_op = &ext4_file_inode_operations; ea_inode->i_fop = &ext4_file_operations; ext4_set_aops(ea_inode); ext4_xattr_inode_set_class(ea_inode); unlock_new_inode(ea_inode); ext4_xattr_inode_set_ref(ea_inode, 1); ext4_xattr_inode_set_hash(ea_inode, hash); err = ext4_mark_inode_dirty(handle, ea_inode); if (!err) err = ext4_inode_attach_jinode(ea_inode); if (err) { if (ext4_xattr_inode_dec_ref(handle, ea_inode)) ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err); iput(ea_inode); return ERR_PTR(err); } /* * Xattr inodes are shared therefore quota charging is performed * at a higher level. */ dquot_free_inode(ea_inode); dquot_drop(ea_inode); inode_lock(ea_inode); ea_inode->i_flags |= S_NOQUOTA; inode_unlock(ea_inode); } return ea_inode; } static struct inode * ext4_xattr_inode_cache_find(struct inode *inode, const void *value, size_t value_len, u32 hash) { struct inode *ea_inode; struct mb_cache_entry *ce; struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode); void *ea_data; if (!ea_inode_cache) return NULL; ce = mb_cache_entry_find_first(ea_inode_cache, hash); if (!ce) return NULL; WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) && !(current->flags & PF_MEMALLOC_NOFS)); ea_data = kvmalloc(value_len, GFP_KERNEL); if (!ea_data) { mb_cache_entry_put(ea_inode_cache, ce); return NULL; } while (ce) { ea_inode = ext4_iget(inode->i_sb, ce->e_value, EXT4_IGET_EA_INODE); if (IS_ERR(ea_inode)) goto next_entry; ext4_xattr_inode_set_class(ea_inode); if (i_size_read(ea_inode) == value_len && !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data, value_len) && !memcmp(value, ea_data, value_len)) { mb_cache_entry_touch(ea_inode_cache, ce); mb_cache_entry_put(ea_inode_cache, ce); kvfree(ea_data); return ea_inode; } iput(ea_inode); next_entry: ce = mb_cache_entry_find_next(ea_inode_cache, ce); } kvfree(ea_data); return NULL; } /* * Add value of the EA in an inode. */ static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode, const void *value, size_t value_len) { struct inode *ea_inode; u32 hash; int err; /* Account inode & space to quota even if sharing... */ err = ext4_xattr_inode_alloc_quota(inode, value_len); if (err) return ERR_PTR(err); hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len); ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash); if (ea_inode) { err = ext4_xattr_inode_inc_ref(handle, ea_inode); if (err) goto out_err; return ea_inode; } /* Create an inode for the EA value */ ea_inode = ext4_xattr_inode_create(handle, inode, hash); if (IS_ERR(ea_inode)) { ext4_xattr_inode_free_quota(inode, NULL, value_len); return ea_inode; } err = ext4_xattr_inode_write(handle, ea_inode, value, value_len); if (err) { if (ext4_xattr_inode_dec_ref(handle, ea_inode)) ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err); goto out_err; } if (EA_INODE_CACHE(inode)) mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash, ea_inode->i_ino, true /* reusable */); return ea_inode; out_err: iput(ea_inode); ext4_xattr_inode_free_quota(inode, NULL, value_len); return ERR_PTR(err); } /* * Reserve min(block_size/8, 1024) bytes for xattr entries/names if ea_inode * feature is enabled. */ #define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U) static int ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s, handle_t *handle, struct inode *inode, struct inode *new_ea_inode, bool is_block) { struct ext4_xattr_entry *last, *next; struct ext4_xattr_entry *here = s->here; size_t min_offs = s->end - s->base, name_len = strlen(i->name); int in_inode = i->in_inode; struct inode *old_ea_inode = NULL; size_t old_size, new_size; int ret; /* Space used by old and new values. */ old_size = (!s->not_found && !here->e_value_inum) ? EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0; new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0; /* * Optimization for the simple case when old and new values have the * same padded sizes. Not applicable if external inodes are involved. */ if (new_size && new_size == old_size) { size_t offs = le16_to_cpu(here->e_value_offs); void *val = s->base + offs; here->e_value_size = cpu_to_le32(i->value_len); if (i->value == EXT4_ZERO_XATTR_VALUE) { memset(val, 0, new_size); } else { memcpy(val, i->value, i->value_len); /* Clear padding bytes. */ memset(val + i->value_len, 0, new_size - i->value_len); } goto update_hash; } /* Compute min_offs and last. */ last = s->first; for (; !IS_LAST_ENTRY(last); last = next) { next = EXT4_XATTR_NEXT(last); if ((void *)next >= s->end) { EXT4_ERROR_INODE(inode, "corrupted xattr entries"); ret = -EFSCORRUPTED; goto out; } if (!last->e_value_inum && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < min_offs) min_offs = offs; } } /* Check whether we have enough space. */ if (i->value) { size_t free; free = min_offs - ((void *)last - s->base) - sizeof(__u32); if (!s->not_found) free += EXT4_XATTR_LEN(name_len) + old_size; if (free < EXT4_XATTR_LEN(name_len) + new_size) { ret = -ENOSPC; goto out; } /* * If storing the value in an external inode is an option, * reserve space for xattr entries/names in the external * attribute block so that a long value does not occupy the * whole space and prevent further entries being added. */ if (ext4_has_feature_ea_inode(inode->i_sb) && new_size && is_block && (min_offs + old_size - new_size) < EXT4_XATTR_BLOCK_RESERVE(inode)) { ret = -ENOSPC; goto out; } } /* * Getting access to old and new ea inodes is subject to failures. * Finish that work before doing any modifications to the xattr data. */ if (!s->not_found && here->e_value_inum) { ret = ext4_xattr_inode_iget(inode, le32_to_cpu(here->e_value_inum), le32_to_cpu(here->e_hash), &old_ea_inode); if (ret) { old_ea_inode = NULL; goto out; } /* We are ready to release ref count on the old_ea_inode. */ ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode); if (ret) goto out; ext4_xattr_inode_free_quota(inode, old_ea_inode, le32_to_cpu(here->e_value_size)); } /* No failures allowed past this point. */ if (!s->not_found && here->e_value_size && !here->e_value_inum) { /* Remove the old value. */ void *first_val = s->base + min_offs; size_t offs = le16_to_cpu(here->e_value_offs); void *val = s->base + offs; memmove(first_val + old_size, first_val, val - first_val); memset(first_val, 0, old_size); min_offs += old_size; /* Adjust all value offsets. */ last = s->first; while (!IS_LAST_ENTRY(last)) { size_t o = le16_to_cpu(last->e_value_offs); if (!last->e_value_inum && last->e_value_size && o < offs) last->e_value_offs = cpu_to_le16(o + old_size); last = EXT4_XATTR_NEXT(last); } } if (!i->value) { /* Remove old name. */ size_t size = EXT4_XATTR_LEN(name_len); last = ENTRY((void *)last - size); memmove(here, (void *)here + size, (void *)last - (void *)here + sizeof(__u32)); memset(last, 0, size); /* * Update i_inline_off - moved ibody region might contain * system.data attribute. Handling a failure here won't * cause other complications for setting an xattr. */ if (!is_block && ext4_has_inline_data(inode)) { ret = ext4_find_inline_data_nolock(inode); if (ret) { ext4_warning_inode(inode, "unable to update i_inline_off"); goto out; } } } else if (s->not_found) { /* Insert new name. */ size_t size = EXT4_XATTR_LEN(name_len); size_t rest = (void *)last - (void *)here + sizeof(__u32); memmove((void *)here + size, here, rest); memset(here, 0, size); here->e_name_index = i->name_index; here->e_name_len = name_len; memcpy(here->e_name, i->name, name_len); } else { /* This is an update, reset value info. */ here->e_value_inum = 0; here->e_value_offs = 0; here->e_value_size = 0; } if (i->value) { /* Insert new value. */ if (in_inode) { here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino); } else if (i->value_len) { void *val = s->base + min_offs - new_size; here->e_value_offs = cpu_to_le16(min_offs - new_size); if (i->value == EXT4_ZERO_XATTR_VALUE) { memset(val, 0, new_size); } else { memcpy(val, i->value, i->value_len); /* Clear padding bytes. */ memset(val + i->value_len, 0, new_size - i->value_len); } } here->e_value_size = cpu_to_le32(i->value_len); } update_hash: if (i->value) { __le32 hash = 0; /* Entry hash calculation. */ if (in_inode) { __le32 crc32c_hash; /* * Feed crc32c hash instead of the raw value for entry * hash calculation. This is to avoid walking * potentially long value buffer again. */ crc32c_hash = cpu_to_le32( ext4_xattr_inode_get_hash(new_ea_inode)); hash = ext4_xattr_hash_entry(here->e_name, here->e_name_len, &crc32c_hash, 1); } else if (is_block) { __le32 *value = s->base + le16_to_cpu( here->e_value_offs); hash = ext4_xattr_hash_entry(here->e_name, here->e_name_len, value, new_size >> 2); } here->e_hash = hash; } if (is_block) ext4_xattr_rehash((struct ext4_xattr_header *)s->base); ret = 0; out: iput(old_ea_inode); return ret; } struct ext4_xattr_block_find { struct ext4_xattr_search s; struct buffer_head *bh; }; static int ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; int error; ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", i->name_index, i->name, i->value, (long)i->value_len); if (EXT4_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bs->bh)) { error = PTR_ERR(bs->bh); bs->bh = NULL; return error; } ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); error = ext4_xattr_check_block(inode, bs->bh); if (error) return error; /* Find the named attribute. */ bs->s.base = BHDR(bs->bh); bs->s.first = BFIRST(bs->bh); bs->s.end = bs->bh->b_data + bs->bh->b_size; bs->s.here = bs->s.first; error = xattr_find_entry(inode, &bs->s.here, bs->s.end, i->name_index, i->name, 1); if (error && error != -ENODATA) return error; bs->s.not_found = error; } return 0; } static int ext4_xattr_block_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; struct buffer_head *new_bh = NULL; struct ext4_xattr_search s_copy = bs->s; struct ext4_xattr_search *s = &s_copy; struct mb_cache_entry *ce = NULL; int error = 0; struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); struct inode *ea_inode = NULL, *tmp_inode; size_t old_ea_inode_quota = 0; unsigned int ea_ino; #define header(x) ((struct ext4_xattr_header *)(x)) /* If we need EA inode, prepare it before locking the buffer */ if (i->value && i->in_inode) { WARN_ON_ONCE(!i->value_len); ea_inode = ext4_xattr_inode_lookup_create(handle, inode, i->value, i->value_len); if (IS_ERR(ea_inode)) { error = PTR_ERR(ea_inode); ea_inode = NULL; goto cleanup; } } if (s->base) { int offset = (char *)s->here - bs->bh->b_data; BUFFER_TRACE(bs->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, sb, bs->bh, EXT4_JTR_NONE); if (error) goto cleanup; lock_buffer(bs->bh); if (header(s->base)->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash); /* * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect modified * block */ if (ea_block_cache) { struct mb_cache_entry *oe; oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, bs->bh->b_blocknr); if (oe) { /* * Xattr block is getting reused. Leave * it alone. */ mb_cache_entry_put(ea_block_cache, oe); goto clone_block; } } ea_bdebug(bs->bh, "modifying in-place"); error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode, true /* is_block */); ext4_xattr_block_csum_set(inode, bs->bh); unlock_buffer(bs->bh); if (error == -EFSCORRUPTED) goto bad_block; if (!error) error = ext4_handle_dirty_metadata(handle, inode, bs->bh); if (error) goto cleanup; goto inserted; } clone_block: unlock_buffer(bs->bh); ea_bdebug(bs->bh, "cloning"); s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS); error = -ENOMEM; if (s->base == NULL) goto cleanup; s->first = ENTRY(header(s->base)+1); header(s->base)->h_refcount = cpu_to_le32(1); s->here = ENTRY(s->base + offset); s->end = s->base + bs->bh->b_size; /* * If existing entry points to an xattr inode, we need * to prevent ext4_xattr_set_entry() from decrementing * ref count on it because the reference belongs to the * original block. In this case, make the entry look * like it has an empty value. */ if (!s->not_found && s->here->e_value_inum) { ea_ino = le32_to_cpu(s->here->e_value_inum); error = ext4_xattr_inode_iget(inode, ea_ino, le32_to_cpu(s->here->e_hash), &tmp_inode); if (error) goto cleanup; if (!ext4_test_inode_state(tmp_inode, EXT4_STATE_LUSTRE_EA_INODE)) { /* * Defer quota free call for previous * inode until success is guaranteed. */ old_ea_inode_quota = le32_to_cpu( s->here->e_value_size); } iput(tmp_inode); s->here->e_value_inum = 0; s->here->e_value_size = 0; } } else { /* Allocate a buffer where we construct the new block. */ s->base = kzalloc(sb->s_blocksize, GFP_NOFS); error = -ENOMEM; if (s->base == NULL) goto cleanup; header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); header(s->base)->h_blocks = cpu_to_le32(1); header(s->base)->h_refcount = cpu_to_le32(1); s->first = ENTRY(header(s->base)+1); s->here = ENTRY(header(s->base)+1); s->end = s->base + sb->s_blocksize; } error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode, true /* is_block */); if (error == -EFSCORRUPTED) goto bad_block; if (error) goto cleanup; inserted: if (!IS_LAST_ENTRY(s->first)) { new_bh = ext4_xattr_block_cache_find(inode, header(s->base), &ce); if (IS_ERR(new_bh)) { error = PTR_ERR(new_bh); new_bh = NULL; goto cleanup; } if (new_bh) { /* We found an identical block in the cache. */ if (new_bh == bs->bh) ea_bdebug(new_bh, "keeping"); else { u32 ref; #ifdef EXT4_XATTR_DEBUG WARN_ON_ONCE(dquot_initialize_needed(inode)); #endif /* The old block is released after updating the inode. */ error = dquot_alloc_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); if (error) goto cleanup; BUFFER_TRACE(new_bh, "get_write_access"); error = ext4_journal_get_write_access( handle, sb, new_bh, EXT4_JTR_NONE); if (error) goto cleanup_dquot; lock_buffer(new_bh); /* * We have to be careful about races with * adding references to xattr block. Once we * hold buffer lock xattr block's state is * stable so we can check the additional * reference fits. */ ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; if (ref > EXT4_XATTR_REFCOUNT_MAX) { /* * Undo everything and check mbcache * again. */ unlock_buffer(new_bh); dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); brelse(new_bh); mb_cache_entry_put(ea_block_cache, ce); ce = NULL; new_bh = NULL; goto inserted; } BHDR(new_bh)->h_refcount = cpu_to_le32(ref); if (ref == EXT4_XATTR_REFCOUNT_MAX) clear_bit(MBE_REUSABLE_B, &ce->e_flags); ea_bdebug(new_bh, "reusing; refcount now=%d", ref); ext4_xattr_block_csum_set(inode, new_bh); unlock_buffer(new_bh); error = ext4_handle_dirty_metadata(handle, inode, new_bh); if (error) goto cleanup_dquot; } mb_cache_entry_touch(ea_block_cache, ce); mb_cache_entry_put(ea_block_cache, ce); ce = NULL; } else if (bs->bh && s->base == bs->bh->b_data) { /* We were modifying this block in-place. */ ea_bdebug(bs->bh, "keeping this block"); ext4_xattr_block_cache_insert(ea_block_cache, bs->bh); new_bh = bs->bh; get_bh(new_bh); } else { /* We need to allocate a new block */ ext4_fsblk_t goal, block; #ifdef EXT4_XATTR_DEBUG WARN_ON_ONCE(dquot_initialize_needed(inode)); #endif goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); block = ext4_new_meta_blocks(handle, inode, goal, 0, NULL, &error); if (error) goto cleanup; ea_idebug(inode, "creating block %llu", (unsigned long long)block); new_bh = sb_getblk(sb, block); if (unlikely(!new_bh)) { error = -ENOMEM; getblk_failed: ext4_free_blocks(handle, inode, NULL, block, 1, EXT4_FREE_BLOCKS_METADATA); goto cleanup; } error = ext4_xattr_inode_inc_ref_all(handle, inode, ENTRY(header(s->base)+1)); if (error) goto getblk_failed; if (ea_inode) { /* Drop the extra ref on ea_inode. */ error = ext4_xattr_inode_dec_ref(handle, ea_inode); if (error) ext4_warning_inode(ea_inode, "dec ref error=%d", error); iput(ea_inode); ea_inode = NULL; } lock_buffer(new_bh); error = ext4_journal_get_create_access(handle, sb, new_bh, EXT4_JTR_NONE); if (error) { unlock_buffer(new_bh); error = -EIO; goto getblk_failed; } memcpy(new_bh->b_data, s->base, new_bh->b_size); ext4_xattr_block_csum_set(inode, new_bh); set_buffer_uptodate(new_bh); unlock_buffer(new_bh); ext4_xattr_block_cache_insert(ea_block_cache, new_bh); error = ext4_handle_dirty_metadata(handle, inode, new_bh); if (error) goto cleanup; } } if (old_ea_inode_quota) ext4_xattr_inode_free_quota(inode, NULL, old_ea_inode_quota); /* Update the inode. */ EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; /* Drop the previous xattr block. */ if (bs->bh && bs->bh != new_bh) { struct ext4_xattr_inode_array *ea_inode_array = NULL; ext4_xattr_release_block(handle, inode, bs->bh, &ea_inode_array, 0 /* extra_credits */); ext4_xattr_inode_array_free(ea_inode_array); } error = 0; cleanup: if (ea_inode) { if (error) { int error2; error2 = ext4_xattr_inode_dec_ref(handle, ea_inode); if (error2) ext4_warning_inode(ea_inode, "dec ref error=%d", error2); ext4_xattr_inode_free_quota(inode, ea_inode, i_size_read(ea_inode)); } iput(ea_inode); } if (ce) mb_cache_entry_put(ea_block_cache, ce); brelse(new_bh); if (!(bs->bh && s->base == bs->bh->b_data)) kfree(s->base); return error; cleanup_dquot: dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); goto cleanup; bad_block: EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); goto cleanup; #undef header } int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; int error; if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return 0; raw_inode = ext4_raw_inode(&is->iloc); header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); is->s.here = is->s.first; is->s.end = ITAIL(inode, raw_inode); if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { /* Find the named attribute. */ error = xattr_find_entry(inode, &is->s.here, is->s.end, i->name_index, i->name, 0); if (error && error != -ENODATA) return error; is->s.not_found = error; } return 0; } int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_search *s = &is->s; struct inode *ea_inode = NULL; int error; if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return -ENOSPC; /* If we need EA inode, prepare it before locking the buffer */ if (i->value && i->in_inode) { WARN_ON_ONCE(!i->value_len); ea_inode = ext4_xattr_inode_lookup_create(handle, inode, i->value, i->value_len); if (IS_ERR(ea_inode)) return PTR_ERR(ea_inode); } error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode, false /* is_block */); if (error) { if (ea_inode) { int error2; error2 = ext4_xattr_inode_dec_ref(handle, ea_inode); if (error2) ext4_warning_inode(ea_inode, "dec ref error=%d", error2); ext4_xattr_inode_free_quota(inode, ea_inode, i_size_read(ea_inode)); iput(ea_inode); } return error; } header = IHDR(inode, ext4_raw_inode(&is->iloc)); if (!IS_LAST_ENTRY(s->first)) { header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); ext4_set_inode_state(inode, EXT4_STATE_XATTR); } else { header->h_magic = cpu_to_le32(0); ext4_clear_inode_state(inode, EXT4_STATE_XATTR); } iput(ea_inode); return 0; } static int ext4_xattr_value_same(struct ext4_xattr_search *s, struct ext4_xattr_info *i) { void *value; /* When e_value_inum is set the value is stored externally. */ if (s->here->e_value_inum) return 0; if (le32_to_cpu(s->here->e_value_size) != i->value_len) return 0; value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs); return !memcmp(value, i->value, i->value_len); } static struct buffer_head *ext4_xattr_get_block(struct inode *inode) { struct buffer_head *bh; int error; if (!EXT4_I(inode)->i_file_acl) return NULL; bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bh)) return bh; error = ext4_xattr_check_block(inode, bh); if (error) { brelse(bh); return ERR_PTR(error); } return bh; } /* * ext4_xattr_set_handle() * * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure. */ int ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { struct ext4_xattr_info i = { .name_index = name_index, .name = name, .value = value, .value_len = value_len, .in_inode = 0, }; struct ext4_xattr_ibody_find is = { .s = { .not_found = -ENODATA, }, }; struct ext4_xattr_block_find bs = { .s = { .not_found = -ENODATA, }, }; int no_expand; int error; if (!name) return -EINVAL; if (strlen(name) > 255) return -ERANGE; ext4_write_lock_xattr(inode, &no_expand); /* Check journal credits under write lock. */ if (ext4_handle_valid(handle)) { struct buffer_head *bh; int credits; bh = ext4_xattr_get_block(inode); if (IS_ERR(bh)) { error = PTR_ERR(bh); goto cleanup; } credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh, value_len, flags & XATTR_CREATE); brelse(bh); if (jbd2_handle_buffer_credits(handle) < credits) { error = -ENOSPC; goto cleanup; } WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS)); } error = ext4_reserve_inode_write(handle, inode, &is.iloc); if (error) goto cleanup; if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) { struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); ext4_clear_inode_state(inode, EXT4_STATE_NEW); } error = ext4_xattr_ibody_find(inode, &i, &is); if (error) goto cleanup; if (is.s.not_found) error = ext4_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; if (is.s.not_found && bs.s.not_found) { error = -ENODATA; if (flags & XATTR_REPLACE) goto cleanup; error = 0; if (!value) goto cleanup; } else { error = -EEXIST; if (flags & XATTR_CREATE) goto cleanup; } if (!value) { if (!is.s.not_found) error = ext4_xattr_ibody_set(handle, inode, &i, &is); else if (!bs.s.not_found) error = ext4_xattr_block_set(handle, inode, &i, &bs); } else { error = 0; /* Xattr value did not change? Save us some work and bail out */ if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i)) goto cleanup; if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i)) goto cleanup; if (ext4_has_feature_ea_inode(inode->i_sb) && (EXT4_XATTR_SIZE(i.value_len) > EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize))) i.in_inode = 1; retry_inode: error = ext4_xattr_ibody_set(handle, inode, &i, &is); if (!error && !bs.s.not_found) { i.value = NULL; error = ext4_xattr_block_set(handle, inode, &i, &bs); } else if (error == -ENOSPC) { if (EXT4_I(inode)->i_file_acl && !bs.s.base) { brelse(bs.bh); bs.bh = NULL; error = ext4_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; } error = ext4_xattr_block_set(handle, inode, &i, &bs); if (!error && !is.s.not_found) { i.value = NULL; error = ext4_xattr_ibody_set(handle, inode, &i, &is); } else if (error == -ENOSPC) { /* * Xattr does not fit in the block, store at * external inode if possible. */ if (ext4_has_feature_ea_inode(inode->i_sb) && i.value_len && !i.in_inode) { i.in_inode = 1; goto retry_inode; } } } } if (!error) { ext4_xattr_update_super_block(handle, inode->i_sb); inode_set_ctime_current(inode); inode_inc_iversion(inode); if (!value) no_expand = 0; error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); /* * The bh is consumed by ext4_mark_iloc_dirty, even with * error != 0. */ is.iloc.bh = NULL; if (IS_SYNC(inode)) ext4_handle_sync(handle); } ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle); cleanup: brelse(is.iloc.bh); brelse(bs.bh); ext4_write_unlock_xattr(inode, &no_expand); return error; } int ext4_xattr_set_credits(struct inode *inode, size_t value_len, bool is_create, int *credits) { struct buffer_head *bh; int err; *credits = 0; if (!EXT4_SB(inode->i_sb)->s_journal) return 0; down_read(&EXT4_I(inode)->xattr_sem); bh = ext4_xattr_get_block(inode); if (IS_ERR(bh)) { err = PTR_ERR(bh); } else { *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh, value_len, is_create); brelse(bh); err = 0; } up_read(&EXT4_I(inode)->xattr_sem); return err; } /* * ext4_xattr_set() * * Like ext4_xattr_set_handle, but start from an inode. This extended * attribute modification is a filesystem transaction by itself. * * Returns 0, or a negative error number on failure. */ int ext4_xattr_set(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { handle_t *handle; struct super_block *sb = inode->i_sb; int error, retries = 0; int credits; error = dquot_initialize(inode); if (error) return error; retry: error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE, &credits); if (error) return error; handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); if (IS_ERR(handle)) { error = PTR_ERR(handle); } else { int error2; error = ext4_xattr_set_handle(handle, inode, name_index, name, value, value_len, flags); ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle); error2 = ext4_journal_stop(handle); if (error == -ENOSPC && ext4_should_retry_alloc(sb, &retries)) goto retry; if (error == 0) error = error2; } return error; } /* * Shift the EA entries in the inode to create space for the increased * i_extra_isize. */ static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry, int value_offs_shift, void *to, void *from, size_t n) { struct ext4_xattr_entry *last = entry; int new_offs; /* We always shift xattr headers further thus offsets get lower */ BUG_ON(value_offs_shift > 0); /* Adjust the value offsets of the entries */ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_inum && last->e_value_size) { new_offs = le16_to_cpu(last->e_value_offs) + value_offs_shift; last->e_value_offs = cpu_to_le16(new_offs); } } /* Shift the entries by n bytes */ memmove(to, from, n); } /* * Move xattr pointed to by 'entry' from inode into external xattr block */ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, struct ext4_inode *raw_inode, struct ext4_xattr_entry *entry) { struct ext4_xattr_ibody_find *is = NULL; struct ext4_xattr_block_find *bs = NULL; char *buffer = NULL, *b_entry_name = NULL; size_t value_size = le32_to_cpu(entry->e_value_size); struct ext4_xattr_info i = { .value = NULL, .value_len = 0, .name_index = entry->e_name_index, .in_inode = !!entry->e_value_inum, }; struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode); int needs_kvfree = 0; int error; is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); if (!is || !bs || !b_entry_name) { error = -ENOMEM; goto out; } is->s.not_found = -ENODATA; bs->s.not_found = -ENODATA; is->iloc.bh = NULL; bs->bh = NULL; /* Save the entry name and the entry value */ if (entry->e_value_inum) { buffer = kvmalloc(value_size, GFP_NOFS); if (!buffer) { error = -ENOMEM; goto out; } needs_kvfree = 1; error = ext4_xattr_inode_get(inode, entry, buffer, value_size); if (error) goto out; } else { size_t value_offs = le16_to_cpu(entry->e_value_offs); buffer = (void *)IFIRST(header) + value_offs; } memcpy(b_entry_name, entry->e_name, entry->e_name_len); b_entry_name[entry->e_name_len] = '\0'; i.name = b_entry_name; error = ext4_get_inode_loc(inode, &is->iloc); if (error) goto out; error = ext4_xattr_ibody_find(inode, &i, is); if (error) goto out; i.value = buffer; i.value_len = value_size; error = ext4_xattr_block_find(inode, &i, bs); if (error) goto out; /* Move ea entry from the inode into the block */ error = ext4_xattr_block_set(handle, inode, &i, bs); if (error) goto out; /* Remove the chosen entry from the inode */ i.value = NULL; i.value_len = 0; error = ext4_xattr_ibody_set(handle, inode, &i, is); out: kfree(b_entry_name); if (needs_kvfree && buffer) kvfree(buffer); if (is) brelse(is->iloc.bh); if (bs) brelse(bs->bh); kfree(is); kfree(bs); return error; } static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, struct ext4_inode *raw_inode, int isize_diff, size_t ifree, size_t bfree, int *total_ino) { struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode); struct ext4_xattr_entry *small_entry; struct ext4_xattr_entry *entry; struct ext4_xattr_entry *last; unsigned int entry_size; /* EA entry size */ unsigned int total_size; /* EA entry size + value size */ unsigned int min_total_size; int error; while (isize_diff > ifree) { entry = NULL; small_entry = NULL; min_total_size = ~0U; last = IFIRST(header); /* Find the entry best suited to be pushed into EA block */ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { /* never move system.data out of the inode */ if ((last->e_name_len == 4) && (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && !memcmp(last->e_name, "data", 4)) continue; total_size = EXT4_XATTR_LEN(last->e_name_len); if (!last->e_value_inum) total_size += EXT4_XATTR_SIZE( le32_to_cpu(last->e_value_size)); if (total_size <= bfree && total_size < min_total_size) { if (total_size + ifree < isize_diff) { small_entry = last; } else { entry = last; min_total_size = total_size; } } } if (entry == NULL) { if (small_entry == NULL) return -ENOSPC; entry = small_entry; } entry_size = EXT4_XATTR_LEN(entry->e_name_len); total_size = entry_size; if (!entry->e_value_inum) total_size += EXT4_XATTR_SIZE( le32_to_cpu(entry->e_value_size)); error = ext4_xattr_move_to_block(handle, inode, raw_inode, entry); if (error) return error; *total_ino -= entry_size; ifree += total_size; bfree -= total_size; } return 0; } /* * Expand an inode by new_extra_isize bytes when EAs are present. * Returns 0 on success or negative error number on failure. */ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, struct ext4_inode *raw_inode, handle_t *handle) { struct ext4_xattr_ibody_header *header; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); static unsigned int mnt_count; size_t min_offs; size_t ifree, bfree; int total_ino; void *base, *end; int error = 0, tried_min_extra_isize = 0; int s_min_extra_isize = le16_to_cpu(sbi->s_es->s_min_extra_isize); int isize_diff; /* How much do we need to grow i_extra_isize */ retry: isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize; if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) return 0; header = IHDR(inode, raw_inode); /* * Check if enough free space is available in the inode to shift the * entries ahead by new_extra_isize. */ base = IFIRST(header); end = ITAIL(inode, raw_inode); min_offs = end - base; total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32); ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino); if (ifree >= isize_diff) goto shift; /* * Enough free space isn't available in the inode, check if * EA block can hold new_extra_isize bytes. */ if (EXT4_I(inode)->i_file_acl) { struct buffer_head *bh; bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bh)) { error = PTR_ERR(bh); goto cleanup; } error = ext4_xattr_check_block(inode, bh); if (error) { brelse(bh); goto cleanup; } base = BHDR(bh); end = bh->b_data + bh->b_size; min_offs = end - base; bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base, NULL); brelse(bh); if (bfree + ifree < isize_diff) { if (!tried_min_extra_isize && s_min_extra_isize) { tried_min_extra_isize++; new_extra_isize = s_min_extra_isize; goto retry; } error = -ENOSPC; goto cleanup; } } else { bfree = inode->i_sb->s_blocksize; } error = ext4_xattr_make_inode_space(handle, inode, raw_inode, isize_diff, ifree, bfree, &total_ino); if (error) { if (error == -ENOSPC && !tried_min_extra_isize && s_min_extra_isize) { tried_min_extra_isize++; new_extra_isize = s_min_extra_isize; goto retry; } goto cleanup; } shift: /* Adjust the offsets and shift the remaining entries ahead */ ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize - new_extra_isize, (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize, (void *)header, total_ino); EXT4_I(inode)->i_extra_isize = new_extra_isize; if (ext4_has_inline_data(inode)) error = ext4_find_inline_data_nolock(inode); cleanup: if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) { ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.", inode->i_ino); mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count); } return error; } #define EIA_INCR 16 /* must be 2^n */ #define EIA_MASK (EIA_INCR - 1) /* Add the large xattr @inode into @ea_inode_array for deferred iput(). * If @ea_inode_array is new or full it will be grown and the old * contents copied over. */ static int ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array, struct inode *inode) { if (*ea_inode_array == NULL) { /* * Start with 15 inodes, so it fits into a power-of-two size. */ (*ea_inode_array) = kmalloc( struct_size(*ea_inode_array, inodes, EIA_MASK), GFP_NOFS); if (*ea_inode_array == NULL) return -ENOMEM; (*ea_inode_array)->count = 0; } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) { /* expand the array once all 15 + n * 16 slots are full */ struct ext4_xattr_inode_array *new_array = NULL; new_array = kmalloc( struct_size(*ea_inode_array, inodes, (*ea_inode_array)->count + EIA_INCR), GFP_NOFS); if (new_array == NULL) return -ENOMEM; memcpy(new_array, *ea_inode_array, struct_size(*ea_inode_array, inodes, (*ea_inode_array)->count)); kfree(*ea_inode_array); *ea_inode_array = new_array; } (*ea_inode_array)->count++; (*ea_inode_array)->inodes[(*ea_inode_array)->count - 1] = inode; return 0; } /* * ext4_xattr_delete_inode() * * Free extended attribute resources associated with this inode. Traverse * all entries and decrement reference on any xattr inodes associated with this * inode. This is called immediately before an inode is freed. We have exclusive * access to the inode. If an orphan inode is deleted it will also release its * references on xattr block and xattr inodes. */ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode, struct ext4_xattr_inode_array **ea_inode_array, int extra_credits) { struct buffer_head *bh = NULL; struct ext4_xattr_ibody_header *header; struct ext4_iloc iloc = { .bh = NULL }; struct ext4_xattr_entry *entry; struct inode *ea_inode; int error; error = ext4_journal_ensure_credits(handle, extra_credits, ext4_free_metadata_revoke_credits(inode->i_sb, 1)); if (error < 0) { EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error); goto cleanup; } if (ext4_has_feature_ea_inode(inode->i_sb) && ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { error = ext4_get_inode_loc(inode, &iloc); if (error) { EXT4_ERROR_INODE(inode, "inode loc (error %d)", error); goto cleanup; } error = ext4_journal_get_write_access(handle, inode->i_sb, iloc.bh, EXT4_JTR_NONE); if (error) { EXT4_ERROR_INODE(inode, "write access (error %d)", error); goto cleanup; } header = IHDR(inode, ext4_raw_inode(&iloc)); if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC)) ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh, IFIRST(header), false /* block_csum */, ea_inode_array, extra_credits, false /* skip_quota */); } if (EXT4_I(inode)->i_file_acl) { bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bh)) { error = PTR_ERR(bh); if (error == -EIO) { EXT4_ERROR_INODE_ERR(inode, EIO, "block %llu read error", EXT4_I(inode)->i_file_acl); } bh = NULL; goto cleanup; } error = ext4_xattr_check_block(inode, bh); if (error) goto cleanup; if (ext4_has_feature_ea_inode(inode->i_sb)) { for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { if (!entry->e_value_inum) continue; error = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum), le32_to_cpu(entry->e_hash), &ea_inode); if (error) continue; ext4_xattr_inode_free_quota(inode, ea_inode, le32_to_cpu(entry->e_value_size)); iput(ea_inode); } } ext4_xattr_release_block(handle, inode, bh, ea_inode_array, extra_credits); /* * Update i_file_acl value in the same transaction that releases * block. */ EXT4_I(inode)->i_file_acl = 0; error = ext4_mark_inode_dirty(handle, inode); if (error) { EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)", error); goto cleanup; } ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle); } error = 0; cleanup: brelse(iloc.bh); brelse(bh); return error; } void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array) { int idx; if (ea_inode_array == NULL) return; for (idx = 0; idx < ea_inode_array->count; ++idx) iput(ea_inode_array->inodes[idx]); kfree(ea_inode_array); } /* * ext4_xattr_block_cache_insert() * * Create a new entry in the extended attribute block cache, and insert * it unless such an entry is already in the cache. */ static void ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache, struct buffer_head *bh) { struct ext4_xattr_header *header = BHDR(bh); __u32 hash = le32_to_cpu(header->h_hash); int reusable = le32_to_cpu(header->h_refcount) < EXT4_XATTR_REFCOUNT_MAX; int error; if (!ea_block_cache) return; error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash, bh->b_blocknr, reusable); if (error) { if (error == -EBUSY) ea_bdebug(bh, "already in cache"); } else ea_bdebug(bh, "inserting [%x]", (int)hash); } /* * ext4_xattr_cmp() * * Compare two extended attribute blocks for equality. * * Returns 0 if the blocks are equal, 1 if they differ. */ static int ext4_xattr_cmp(struct ext4_xattr_header *header1, struct ext4_xattr_header *header2) { struct ext4_xattr_entry *entry1, *entry2; entry1 = ENTRY(header1+1); entry2 = ENTRY(header2+1); while (!IS_LAST_ENTRY(entry1)) { if (IS_LAST_ENTRY(entry2)) return 1; if (entry1->e_hash != entry2->e_hash || entry1->e_name_index != entry2->e_name_index || entry1->e_name_len != entry2->e_name_len || entry1->e_value_size != entry2->e_value_size || entry1->e_value_inum != entry2->e_value_inum || memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) return 1; if (!entry1->e_value_inum && memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), (char *)header2 + le16_to_cpu(entry2->e_value_offs), le32_to_cpu(entry1->e_value_size))) return 1; entry1 = EXT4_XATTR_NEXT(entry1); entry2 = EXT4_XATTR_NEXT(entry2); } if (!IS_LAST_ENTRY(entry2)) return 1; return 0; } /* * ext4_xattr_block_cache_find() * * Find an identical extended attribute block. * * Returns a pointer to the block found, or NULL if such a block was not * found, or an error pointer if an error occurred while reading ea block. */ static struct buffer_head * ext4_xattr_block_cache_find(struct inode *inode, struct ext4_xattr_header *header, struct mb_cache_entry **pce) { __u32 hash = le32_to_cpu(header->h_hash); struct mb_cache_entry *ce; struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); if (!ea_block_cache) return NULL; if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); ce = mb_cache_entry_find_first(ea_block_cache, hash); while (ce) { struct buffer_head *bh; bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO); if (IS_ERR(bh)) { if (PTR_ERR(bh) != -ENOMEM) EXT4_ERROR_INODE(inode, "block %lu read error", (unsigned long)ce->e_value); mb_cache_entry_put(ea_block_cache, ce); return bh; } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { *pce = ce; return bh; } brelse(bh); ce = mb_cache_entry_find_next(ea_block_cache, ce); } return NULL; } #define NAME_HASH_SHIFT 5 #define VALUE_HASH_SHIFT 16 /* * ext4_xattr_hash_entry() * * Compute the hash of an extended attribute. */ static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, size_t value_count) { __u32 hash = 0; while (name_len--) { hash = (hash << NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ (unsigned char)*name++; } while (value_count--) { hash = (hash << VALUE_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ le32_to_cpu(*value++); } return cpu_to_le32(hash); } /* * ext4_xattr_hash_entry_signed() * * Compute the hash of an extended attribute incorrectly. */ static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, size_t value_count) { __u32 hash = 0; while (name_len--) { hash = (hash << NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ (signed char)*name++; } while (value_count--) { hash = (hash << VALUE_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ le32_to_cpu(*value++); } return cpu_to_le32(hash); } #undef NAME_HASH_SHIFT #undef VALUE_HASH_SHIFT #define BLOCK_HASH_SHIFT 16 /* * ext4_xattr_rehash() * * Re-compute the extended attribute hash value after an entry has changed. */ static void ext4_xattr_rehash(struct ext4_xattr_header *header) { struct ext4_xattr_entry *here; __u32 hash = 0; here = ENTRY(header+1); while (!IS_LAST_ENTRY(here)) { if (!here->e_hash) { /* Block is not shared if an entry's hash value == 0 */ hash = 0; break; } hash = (hash << BLOCK_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ le32_to_cpu(here->e_hash); here = EXT4_XATTR_NEXT(here); } header->h_hash = cpu_to_le32(hash); } #undef BLOCK_HASH_SHIFT #define HASH_BUCKET_BITS 10 struct mb_cache * ext4_xattr_create_cache(void) { return mb_cache_create(HASH_BUCKET_BITS); } void ext4_xattr_destroy_cache(struct mb_cache *cache) { if (cache) mb_cache_destroy(cache); } |
1 5 1 5 1 1 1 1 1 1 1 1 1 1 5 5 4 1 1 1 1 5 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 | // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for Cougar 500k Gaming Keyboard * * Copyright (c) 2018 Daniel M. Lambea <dmlambea@gmail.com> */ #include <linux/hid.h> #include <linux/module.h> #include <linux/printk.h> #include "hid-ids.h" MODULE_AUTHOR("Daniel M. Lambea <dmlambea@gmail.com>"); MODULE_DESCRIPTION("Cougar 500k Gaming Keyboard"); MODULE_LICENSE("GPL"); MODULE_INFO(key_mappings, "G1-G6 are mapped to F13-F18"); static bool g6_is_space = true; MODULE_PARM_DESC(g6_is_space, "If true, G6 programmable key sends SPACE instead of F18 (default=true)"); #define COUGAR_VENDOR_USAGE 0xff00ff00 #define COUGAR_FIELD_CODE 1 #define COUGAR_FIELD_ACTION 2 #define COUGAR_KEY_G1 0x83 #define COUGAR_KEY_G2 0x84 #define COUGAR_KEY_G3 0x85 #define COUGAR_KEY_G4 0x86 #define COUGAR_KEY_G5 0x87 #define COUGAR_KEY_G6 0x78 #define COUGAR_KEY_FN 0x0d #define COUGAR_KEY_MR 0x6f #define COUGAR_KEY_M1 0x80 #define COUGAR_KEY_M2 0x81 #define COUGAR_KEY_M3 0x82 #define COUGAR_KEY_LEDS 0x67 #define COUGAR_KEY_LOCK 0x6e /* Default key mappings. The special key COUGAR_KEY_G6 is defined first * because it is more frequent to use the spacebar rather than any other * special keys. Depending on the value of the parameter 'g6_is_space', * the mapping will be updated in the probe function. */ static unsigned char cougar_mapping[][2] = { { COUGAR_KEY_G6, KEY_SPACE }, { COUGAR_KEY_G1, KEY_F13 }, { COUGAR_KEY_G2, KEY_F14 }, { COUGAR_KEY_G3, KEY_F15 }, { COUGAR_KEY_G4, KEY_F16 }, { COUGAR_KEY_G5, KEY_F17 }, { COUGAR_KEY_LOCK, KEY_SCREENLOCK }, /* The following keys are handled by the hardware itself, so no special * treatment is required: { COUGAR_KEY_FN, KEY_RESERVED }, { COUGAR_KEY_MR, KEY_RESERVED }, { COUGAR_KEY_M1, KEY_RESERVED }, { COUGAR_KEY_M2, KEY_RESERVED }, { COUGAR_KEY_M3, KEY_RESERVED }, { COUGAR_KEY_LEDS, KEY_RESERVED }, */ { 0, 0 }, }; struct cougar_shared { struct list_head list; struct kref kref; bool enabled; struct hid_device *dev; struct input_dev *input; }; struct cougar { bool special_intf; struct cougar_shared *shared; }; static LIST_HEAD(cougar_udev_list); static DEFINE_MUTEX(cougar_udev_list_lock); /** * cougar_fix_g6_mapping - configure the mapping for key G6/Spacebar */ static void cougar_fix_g6_mapping(void) { int i; for (i = 0; cougar_mapping[i][0]; i++) { if (cougar_mapping[i][0] == COUGAR_KEY_G6) { cougar_mapping[i][1] = g6_is_space ? KEY_SPACE : KEY_F18; pr_info("cougar: G6 mapped to %s\n", g6_is_space ? "space" : "F18"); return; } } pr_warn("cougar: no mappings defined for G6/spacebar"); } /* * Constant-friendly rdesc fixup for mouse interface */ static const __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 && (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) { hid_info(hdev, "usage count exceeds max: fixing up report descriptor\n"); rdesc[115] = ((HID_MAX_USAGES-1) & 0xff); rdesc[116] = ((HID_MAX_USAGES-1) >> 8); } return rdesc; } static struct cougar_shared *cougar_get_shared_data(struct hid_device *hdev) { struct cougar_shared *shared; /* Try to find an already-probed interface from the same device */ list_for_each_entry(shared, &cougar_udev_list, list) { if (hid_compare_device_paths(hdev, shared->dev, '/')) { kref_get(&shared->kref); return shared; } } return NULL; } static void cougar_release_shared_data(struct kref *kref) { struct cougar_shared *shared = container_of(kref, struct cougar_shared, kref); mutex_lock(&cougar_udev_list_lock); list_del(&shared->list); mutex_unlock(&cougar_udev_list_lock); kfree(shared); } static void cougar_remove_shared_data(void *resource) { struct cougar *cougar = resource; if (cougar->shared) { kref_put(&cougar->shared->kref, cougar_release_shared_data); cougar->shared = NULL; } } /* * Bind the device group's shared data to this cougar struct. * If no shared data exists for this group, create and initialize it. */ static int cougar_bind_shared_data(struct hid_device *hdev, struct cougar *cougar) { struct cougar_shared *shared; int error = 0; mutex_lock(&cougar_udev_list_lock); shared = cougar_get_shared_data(hdev); if (!shared) { shared = kzalloc(sizeof(*shared), GFP_KERNEL); if (!shared) { error = -ENOMEM; goto out; } kref_init(&shared->kref); shared->dev = hdev; list_add_tail(&shared->list, &cougar_udev_list); } cougar->shared = shared; error = devm_add_action_or_reset(&hdev->dev, cougar_remove_shared_data, cougar); if (error) { mutex_unlock(&cougar_udev_list_lock); return error; } out: mutex_unlock(&cougar_udev_list_lock); return error; } static int cougar_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct cougar *cougar; struct hid_input *next, *hidinput = NULL; unsigned int connect_mask; int error; cougar = devm_kzalloc(&hdev->dev, sizeof(*cougar), GFP_KERNEL); if (!cougar) return -ENOMEM; hid_set_drvdata(hdev, cougar); error = hid_parse(hdev); if (error) { hid_err(hdev, "parse failed\n"); return error; } if (hdev->collection->usage == COUGAR_VENDOR_USAGE) { cougar->special_intf = true; connect_mask = HID_CONNECT_HIDRAW; } else connect_mask = HID_CONNECT_DEFAULT; error = hid_hw_start(hdev, connect_mask); if (error) { hid_err(hdev, "hw start failed\n"); return error; } error = cougar_bind_shared_data(hdev, cougar); if (error) goto fail_stop_and_cleanup; /* The custom vendor interface will use the hid_input registered * for the keyboard interface, in order to send translated key codes * to it. */ if (hdev->collection->usage == HID_GD_KEYBOARD) { list_for_each_entry_safe(hidinput, next, &hdev->inputs, list) { if (hidinput->registered && hidinput->input != NULL) { cougar->shared->input = hidinput->input; cougar->shared->enabled = true; break; } } } else if (hdev->collection->usage == COUGAR_VENDOR_USAGE) { /* Preinit the mapping table */ cougar_fix_g6_mapping(); error = hid_hw_open(hdev); if (error) goto fail_stop_and_cleanup; } return 0; fail_stop_and_cleanup: hid_hw_stop(hdev); return error; } /* * Convert events from vendor intf to input key events */ static int cougar_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct cougar *cougar; struct cougar_shared *shared; unsigned char code, action; int i; cougar = hid_get_drvdata(hdev); shared = cougar->shared; if (!cougar->special_intf || !shared) return 0; if (!shared->enabled || !shared->input) return -EPERM; code = data[COUGAR_FIELD_CODE]; action = data[COUGAR_FIELD_ACTION]; for (i = 0; cougar_mapping[i][0]; i++) { if (code == cougar_mapping[i][0]) { input_event(shared->input, EV_KEY, cougar_mapping[i][1], action); input_sync(shared->input); return -EPERM; } } /* Avoid warnings on the same unmapped key twice */ if (action != 0) hid_warn(hdev, "unmapped special key code %0x: ignoring\n", code); return -EPERM; } static void cougar_remove(struct hid_device *hdev) { struct cougar *cougar = hid_get_drvdata(hdev); if (cougar) { /* Stop the vendor intf to process more events */ if (cougar->shared) cougar->shared->enabled = false; if (cougar->special_intf) hid_hw_close(hdev); } hid_hw_stop(hdev); } static int cougar_param_set_g6_is_space(const char *val, const struct kernel_param *kp) { int ret; ret = param_set_bool(val, kp); if (ret) return ret; cougar_fix_g6_mapping(); return 0; } static const struct kernel_param_ops cougar_g6_is_space_ops = { .set = cougar_param_set_g6_is_space, .get = param_get_bool, }; module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644); static const struct hid_device_id cougar_id_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD) }, {} }; MODULE_DEVICE_TABLE(hid, cougar_id_table); static struct hid_driver cougar_driver = { .name = "cougar", .id_table = cougar_id_table, .report_fixup = cougar_report_fixup, .probe = cougar_probe, .remove = cougar_remove, .raw_event = cougar_raw_event, }; module_hid_driver(cougar_driver); |
20 20 20 20 20 3 18 20 20 20 20 3 3 20 20 3 3 20 20 20 2 20 20 20 20 20 20 20 20 20 2 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 | // SPDX-License-Identifier: GPL-2.0+ /* * comedi/comedi_fops.c * comedi kernel module * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1997-2007 David A. Schleef <ds@schleef.org> * compat ioctls: * Author: Ian Abbott, MEV Ltd. <abbotti@mev.co.uk> * Copyright (C) 2007 MEV Ltd. <http://www.mev.co.uk/> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/comedi/comedidev.h> #include <linux/cdev.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/compat.h> #include "comedi_internal.h" /* * comedi_subdevice "runflags" * COMEDI_SRF_RT: DEPRECATED: command is running real-time * COMEDI_SRF_ERROR: indicates an COMEDI_CB_ERROR event has occurred * since the last command was started * COMEDI_SRF_RUNNING: command is running * COMEDI_SRF_FREE_SPRIV: free s->private on detach * * COMEDI_SRF_BUSY_MASK: runflags that indicate the subdevice is "busy" */ #define COMEDI_SRF_RT BIT(1) #define COMEDI_SRF_ERROR BIT(2) #define COMEDI_SRF_RUNNING BIT(27) #define COMEDI_SRF_FREE_SPRIV BIT(31) #define COMEDI_SRF_BUSY_MASK (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING) /** * struct comedi_file - Per-file private data for COMEDI device * @dev: COMEDI device. * @read_subdev: Current "read" subdevice. * @write_subdev: Current "write" subdevice. * @last_detach_count: Last known detach count. * @last_attached: Last known attached/detached state. */ struct comedi_file { struct comedi_device *dev; struct comedi_subdevice *read_subdev; struct comedi_subdevice *write_subdev; unsigned int last_detach_count; unsigned int last_attached:1; }; #define COMEDI_NUM_MINORS 0x100 #define COMEDI_NUM_SUBDEVICE_MINORS \ (COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS) static unsigned short comedi_num_legacy_minors; module_param(comedi_num_legacy_minors, ushort, 0444); MODULE_PARM_DESC(comedi_num_legacy_minors, "number of comedi minor devices to reserve for non-auto-configured devices (default 0)" ); unsigned int comedi_default_buf_size_kb = CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB; module_param(comedi_default_buf_size_kb, uint, 0644); MODULE_PARM_DESC(comedi_default_buf_size_kb, "default asynchronous buffer size in KiB (default " __MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB) ")"); unsigned int comedi_default_buf_maxsize_kb = CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB; module_param(comedi_default_buf_maxsize_kb, uint, 0644); MODULE_PARM_DESC(comedi_default_buf_maxsize_kb, "default maximum size of asynchronous buffer in KiB (default " __MODULE_STRING(CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB) ")"); static DEFINE_MUTEX(comedi_board_minor_table_lock); static struct comedi_device *comedi_board_minor_table[COMEDI_NUM_BOARD_MINORS]; static DEFINE_MUTEX(comedi_subdevice_minor_table_lock); /* Note: indexed by minor - COMEDI_NUM_BOARD_MINORS. */ static struct comedi_subdevice *comedi_subdevice_minor_table[COMEDI_NUM_SUBDEVICE_MINORS]; static struct cdev comedi_cdev; static void comedi_device_init(struct comedi_device *dev) { kref_init(&dev->refcount); spin_lock_init(&dev->spinlock); mutex_init(&dev->mutex); init_rwsem(&dev->attach_lock); dev->minor = -1; } static void comedi_dev_kref_release(struct kref *kref) { struct comedi_device *dev = container_of(kref, struct comedi_device, refcount); mutex_destroy(&dev->mutex); put_device(dev->class_dev); kfree(dev); } /** * comedi_dev_put() - Release a use of a COMEDI device * @dev: COMEDI device. * * Must be called when a user of a COMEDI device is finished with it. * When the last user of the COMEDI device calls this function, the * COMEDI device is destroyed. * * Return: 1 if the COMEDI device is destroyed by this call or @dev is * NULL, otherwise return 0. Callers must not assume the COMEDI * device is still valid if this function returns 0. */ int comedi_dev_put(struct comedi_device *dev) { if (dev) return kref_put(&dev->refcount, comedi_dev_kref_release); return 1; } EXPORT_SYMBOL_GPL(comedi_dev_put); static struct comedi_device *comedi_dev_get(struct comedi_device *dev) { if (dev) kref_get(&dev->refcount); return dev; } static void comedi_device_cleanup(struct comedi_device *dev) { struct module *driver_module = NULL; if (!dev) return; mutex_lock(&dev->mutex); if (dev->attached) driver_module = dev->driver->module; comedi_device_detach(dev); if (driver_module && dev->use_count) module_put(driver_module); mutex_unlock(&dev->mutex); } static bool comedi_clear_board_dev(struct comedi_device *dev) { unsigned int i = dev->minor; bool cleared = false; lockdep_assert_held(&dev->mutex); mutex_lock(&comedi_board_minor_table_lock); if (dev == comedi_board_minor_table[i]) { comedi_board_minor_table[i] = NULL; cleared = true; } mutex_unlock(&comedi_board_minor_table_lock); return cleared; } static struct comedi_device *comedi_clear_board_minor(unsigned int minor) { struct comedi_device *dev; mutex_lock(&comedi_board_minor_table_lock); dev = comedi_board_minor_table[minor]; comedi_board_minor_table[minor] = NULL; mutex_unlock(&comedi_board_minor_table_lock); return dev; } static struct comedi_subdevice * comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned int minor) { struct comedi_subdevice *s; unsigned int i = minor - COMEDI_NUM_BOARD_MINORS; mutex_lock(&comedi_subdevice_minor_table_lock); s = comedi_subdevice_minor_table[i]; if (s && s->device != dev) s = NULL; mutex_unlock(&comedi_subdevice_minor_table_lock); return s; } static struct comedi_device *comedi_dev_get_from_board_minor(unsigned int minor) { struct comedi_device *dev; mutex_lock(&comedi_board_minor_table_lock); dev = comedi_dev_get(comedi_board_minor_table[minor]); mutex_unlock(&comedi_board_minor_table_lock); return dev; } static struct comedi_device * comedi_dev_get_from_subdevice_minor(unsigned int minor) { struct comedi_device *dev; struct comedi_subdevice *s; unsigned int i = minor - COMEDI_NUM_BOARD_MINORS; mutex_lock(&comedi_subdevice_minor_table_lock); s = comedi_subdevice_minor_table[i]; dev = comedi_dev_get(s ? s->device : NULL); mutex_unlock(&comedi_subdevice_minor_table_lock); return dev; } /** * comedi_dev_get_from_minor() - Get COMEDI device by minor device number * @minor: Minor device number. * * Finds the COMEDI device associated with the minor device number, if any, * and increments its reference count. The COMEDI device is prevented from * being freed until a matching call is made to comedi_dev_put(). * * Return: A pointer to the COMEDI device if it exists, with its usage * reference incremented. Return NULL if no COMEDI device exists with the * specified minor device number. */ struct comedi_device *comedi_dev_get_from_minor(unsigned int minor) { if (minor < COMEDI_NUM_BOARD_MINORS) return comedi_dev_get_from_board_minor(minor); return comedi_dev_get_from_subdevice_minor(minor); } EXPORT_SYMBOL_GPL(comedi_dev_get_from_minor); static struct comedi_subdevice * comedi_read_subdevice(const struct comedi_device *dev, unsigned int minor) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (minor >= COMEDI_NUM_BOARD_MINORS) { s = comedi_subdevice_from_minor(dev, minor); if (!s || (s->subdev_flags & SDF_CMD_READ)) return s; } return dev->read_subdev; } static struct comedi_subdevice * comedi_write_subdevice(const struct comedi_device *dev, unsigned int minor) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (minor >= COMEDI_NUM_BOARD_MINORS) { s = comedi_subdevice_from_minor(dev, minor); if (!s || (s->subdev_flags & SDF_CMD_WRITE)) return s; } return dev->write_subdev; } static void comedi_file_reset(struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s, *read_s, *write_s; unsigned int minor = iminor(file_inode(file)); read_s = dev->read_subdev; write_s = dev->write_subdev; if (minor >= COMEDI_NUM_BOARD_MINORS) { s = comedi_subdevice_from_minor(dev, minor); if (!s || s->subdev_flags & SDF_CMD_READ) read_s = s; if (!s || s->subdev_flags & SDF_CMD_WRITE) write_s = s; } cfp->last_attached = dev->attached; cfp->last_detach_count = dev->detach_count; WRITE_ONCE(cfp->read_subdev, read_s); WRITE_ONCE(cfp->write_subdev, write_s); } static void comedi_file_check(struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; if (cfp->last_attached != dev->attached || cfp->last_detach_count != dev->detach_count) comedi_file_reset(file); } static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file) { struct comedi_file *cfp = file->private_data; comedi_file_check(file); return READ_ONCE(cfp->read_subdev); } static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file) { struct comedi_file *cfp = file->private_data; comedi_file_check(file); return READ_ONCE(cfp->write_subdev); } static int resize_async_buffer(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int new_size) { struct comedi_async *async = s->async; int retval; lockdep_assert_held(&dev->mutex); if (new_size > async->max_bufsize) return -EPERM; if (s->busy) { dev_dbg(dev->class_dev, "subdevice is busy, cannot resize buffer\n"); return -EBUSY; } if (comedi_buf_is_mmapped(s)) { dev_dbg(dev->class_dev, "subdevice is mmapped, cannot resize buffer\n"); return -EBUSY; } /* make sure buffer is an integral number of pages (we round up) */ new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK; retval = comedi_buf_alloc(dev, s, new_size); if (retval < 0) return retval; if (s->buf_change) { retval = s->buf_change(dev, s); if (retval < 0) return retval; } dev_dbg(dev->class_dev, "subd %d buffer resized to %i bytes\n", s->index, async->prealloc_bufsz); return 0; } /* sysfs attribute files */ static ssize_t max_read_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) size = s->async->max_bufsize / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t max_read_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) s->async->max_bufsize = size; else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(max_read_buffer_kb); static ssize_t read_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) size = s->async->prealloc_bufsz / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t read_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_read_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_READ) && s->async) err = resize_async_buffer(dev, s, size); else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(read_buffer_kb); static ssize_t max_write_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) size = s->async->max_bufsize / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t max_write_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) s->async->max_bufsize = size; else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(max_write_buffer_kb); static ssize_t write_buffer_kb_show(struct device *csdev, struct device_attribute *attr, char *buf) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size = 0; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) size = s->async->prealloc_bufsz / 1024; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return sysfs_emit(buf, "%u\n", size); } static ssize_t write_buffer_kb_store(struct device *csdev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int minor = MINOR(csdev->devt); struct comedi_device *dev; struct comedi_subdevice *s; unsigned int size; int err; err = kstrtouint(buf, 10, &size); if (err) return err; if (size > (UINT_MAX / 1024)) return -EINVAL; size *= 1024; dev = comedi_dev_get_from_minor(minor); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); s = comedi_write_subdevice(dev, minor); if (s && (s->subdev_flags & SDF_CMD_WRITE) && s->async) err = resize_async_buffer(dev, s, size); else err = -EINVAL; mutex_unlock(&dev->mutex); comedi_dev_put(dev); return err ? err : count; } static DEVICE_ATTR_RW(write_buffer_kb); static struct attribute *comedi_dev_attrs[] = { &dev_attr_max_read_buffer_kb.attr, &dev_attr_read_buffer_kb.attr, &dev_attr_max_write_buffer_kb.attr, &dev_attr_write_buffer_kb.attr, NULL, }; ATTRIBUTE_GROUPS(comedi_dev); static const struct class comedi_class = { .name = "comedi", .dev_groups = comedi_dev_groups, }; static void comedi_free_board_dev(struct comedi_device *dev) { if (dev) { comedi_device_cleanup(dev); if (dev->class_dev) { device_destroy(&comedi_class, MKDEV(COMEDI_MAJOR, dev->minor)); } comedi_dev_put(dev); } } static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s, unsigned int bits) { s->runflags &= ~bits; } static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s, unsigned int bits) { s->runflags |= bits; } static void comedi_update_subdevice_runflags(struct comedi_subdevice *s, unsigned int mask, unsigned int bits) { unsigned long flags; spin_lock_irqsave(&s->spin_lock, flags); __comedi_clear_subdevice_runflags(s, mask); __comedi_set_subdevice_runflags(s, bits & mask); spin_unlock_irqrestore(&s->spin_lock, flags); } static unsigned int __comedi_get_subdevice_runflags(struct comedi_subdevice *s) { return s->runflags; } static unsigned int comedi_get_subdevice_runflags(struct comedi_subdevice *s) { unsigned long flags; unsigned int runflags; spin_lock_irqsave(&s->spin_lock, flags); runflags = __comedi_get_subdevice_runflags(s); spin_unlock_irqrestore(&s->spin_lock, flags); return runflags; } static bool comedi_is_runflags_running(unsigned int runflags) { return runflags & COMEDI_SRF_RUNNING; } static bool comedi_is_runflags_in_error(unsigned int runflags) { return runflags & COMEDI_SRF_ERROR; } /** * comedi_is_subdevice_running() - Check if async command running on subdevice * @s: COMEDI subdevice. * * Return: %true if an asynchronous COMEDI command is active on the * subdevice, else %false. */ bool comedi_is_subdevice_running(struct comedi_subdevice *s) { unsigned int runflags = comedi_get_subdevice_runflags(s); return comedi_is_runflags_running(runflags); } EXPORT_SYMBOL_GPL(comedi_is_subdevice_running); static bool __comedi_is_subdevice_running(struct comedi_subdevice *s) { unsigned int runflags = __comedi_get_subdevice_runflags(s); return comedi_is_runflags_running(runflags); } bool comedi_can_auto_free_spriv(struct comedi_subdevice *s) { unsigned int runflags = __comedi_get_subdevice_runflags(s); return runflags & COMEDI_SRF_FREE_SPRIV; } /** * comedi_set_spriv_auto_free() - Mark subdevice private data as freeable * @s: COMEDI subdevice. * * Mark the subdevice as having a pointer to private data that can be * automatically freed when the COMEDI device is detached from the low-level * driver. */ void comedi_set_spriv_auto_free(struct comedi_subdevice *s) { __comedi_set_subdevice_runflags(s, COMEDI_SRF_FREE_SPRIV); } EXPORT_SYMBOL_GPL(comedi_set_spriv_auto_free); /** * comedi_alloc_spriv - Allocate memory for the subdevice private data * @s: COMEDI subdevice. * @size: Size of the memory to allocate. * * Allocate memory for the subdevice private data and point @s->private * to it. The memory will be freed automatically when the COMEDI device * is detached from the low-level driver. * * Return: A pointer to the allocated memory @s->private on success. * Return NULL on failure. */ void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size) { s->private = kzalloc(size, GFP_KERNEL); if (s->private) comedi_set_spriv_auto_free(s); return s->private; } EXPORT_SYMBOL_GPL(comedi_alloc_spriv); /* * This function restores a subdevice to an idle state. */ static void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; lockdep_assert_held(&dev->mutex); comedi_update_subdevice_runflags(s, COMEDI_SRF_RUNNING, 0); if (async) { comedi_buf_reset(s); async->inttrig = NULL; kfree(async->cmd.chanlist); async->cmd.chanlist = NULL; s->busy = NULL; wake_up_interruptible_all(&async->wait_head); } else { dev_err(dev->class_dev, "BUG: (?) %s called with async=NULL\n", __func__); s->busy = NULL; } } static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { int ret = 0; lockdep_assert_held(&dev->mutex); if (comedi_is_subdevice_running(s) && s->cancel) ret = s->cancel(dev, s); do_become_nonbusy(dev, s); return ret; } void comedi_device_cancel_all(struct comedi_device *dev) { struct comedi_subdevice *s; int i; lockdep_assert_held(&dev->mutex); if (!dev->attached) return; for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->async) do_cancel(dev, s); } } static int is_device_busy(struct comedi_device *dev) { struct comedi_subdevice *s; int i; lockdep_assert_held(&dev->mutex); if (!dev->attached) return 0; for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->busy) return 1; if (s->async && comedi_buf_is_mmapped(s)) return 1; } return 0; } /* * COMEDI_DEVCONFIG ioctl * attaches (and configures) or detaches a legacy device * * arg: * pointer to comedi_devconfig structure (NULL if detaching) * * reads: * comedi_devconfig structure (if attaching) * * writes: * nothing */ static int do_devconfig_ioctl(struct comedi_device *dev, struct comedi_devconfig __user *arg) { struct comedi_devconfig it; lockdep_assert_held(&dev->mutex); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!arg) { if (is_device_busy(dev)) return -EBUSY; if (dev->attached) { struct module *driver_module = dev->driver->module; comedi_device_detach(dev); module_put(driver_module); } return 0; } if (copy_from_user(&it, arg, sizeof(it))) return -EFAULT; it.board_name[COMEDI_NAMELEN - 1] = 0; if (it.options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) { dev_warn(dev->class_dev, "comedi_config --init_data is deprecated\n"); return -EINVAL; } if (dev->minor >= comedi_num_legacy_minors) /* don't re-use dynamically allocated comedi devices */ return -EBUSY; /* This increments the driver module count on success. */ return comedi_device_attach(dev, &it); } /* * COMEDI_BUFCONFIG ioctl * buffer configuration * * arg: * pointer to comedi_bufconfig structure * * reads: * comedi_bufconfig structure * * writes: * modified comedi_bufconfig structure */ static int do_bufconfig_ioctl(struct comedi_device *dev, struct comedi_bufconfig __user *arg) { struct comedi_bufconfig bc; struct comedi_async *async; struct comedi_subdevice *s; int retval = 0; lockdep_assert_held(&dev->mutex); if (copy_from_user(&bc, arg, sizeof(bc))) return -EFAULT; if (bc.subdevice >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[bc.subdevice]; async = s->async; if (!async) { dev_dbg(dev->class_dev, "subdevice does not have async capability\n"); bc.size = 0; bc.maximum_size = 0; goto copyback; } if (bc.maximum_size) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; async->max_bufsize = bc.maximum_size; } if (bc.size) { retval = resize_async_buffer(dev, s, bc.size); if (retval < 0) return retval; } bc.size = async->prealloc_bufsz; bc.maximum_size = async->max_bufsize; copyback: if (copy_to_user(arg, &bc, sizeof(bc))) return -EFAULT; return 0; } /* * COMEDI_DEVINFO ioctl * device info * * arg: * pointer to comedi_devinfo structure * * reads: * nothing * * writes: * comedi_devinfo structure */ static int do_devinfo_ioctl(struct comedi_device *dev, struct comedi_devinfo __user *arg, struct file *file) { struct comedi_subdevice *s; struct comedi_devinfo devinfo; lockdep_assert_held(&dev->mutex); memset(&devinfo, 0, sizeof(devinfo)); /* fill devinfo structure */ devinfo.version_code = COMEDI_VERSION_CODE; devinfo.n_subdevs = dev->n_subdevices; strscpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN); strscpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN); s = comedi_file_read_subdevice(file); if (s) devinfo.read_subdevice = s->index; else devinfo.read_subdevice = -1; s = comedi_file_write_subdevice(file); if (s) devinfo.write_subdevice = s->index; else devinfo.write_subdevice = -1; if (copy_to_user(arg, &devinfo, sizeof(devinfo))) return -EFAULT; return 0; } /* * COMEDI_SUBDINFO ioctl * subdevices info * * arg: * pointer to array of comedi_subdinfo structures * * reads: * nothing * * writes: * array of comedi_subdinfo structures */ static int do_subdinfo_ioctl(struct comedi_device *dev, struct comedi_subdinfo __user *arg, void *file) { int ret, i; struct comedi_subdinfo *tmp, *us; struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); tmp = kcalloc(dev->n_subdevices, sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; /* fill subdinfo structs */ for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; us = tmp + i; us->type = s->type; us->n_chan = s->n_chan; us->subd_flags = s->subdev_flags; if (comedi_is_subdevice_running(s)) us->subd_flags |= SDF_RUNNING; #define TIMER_nanosec 5 /* backwards compatibility */ us->timer_type = TIMER_nanosec; us->len_chanlist = s->len_chanlist; us->maxdata = s->maxdata; if (s->range_table) { us->range_type = (i << 24) | (0 << 16) | (s->range_table->length); } else { us->range_type = 0; /* XXX */ } if (s->busy) us->subd_flags |= SDF_BUSY; if (s->busy == file) us->subd_flags |= SDF_BUSY_OWNER; if (s->lock) us->subd_flags |= SDF_LOCKED; if (s->lock == file) us->subd_flags |= SDF_LOCK_OWNER; if (!s->maxdata && s->maxdata_list) us->subd_flags |= SDF_MAXDATA; if (s->range_table_list) us->subd_flags |= SDF_RANGETYPE; if (s->do_cmd) us->subd_flags |= SDF_CMD; if (s->insn_bits != &insn_inval) us->insn_bits_support = COMEDI_SUPPORTED; else us->insn_bits_support = COMEDI_UNSUPPORTED; } ret = copy_to_user(arg, tmp, dev->n_subdevices * sizeof(*tmp)); kfree(tmp); return ret ? -EFAULT : 0; } /* * COMEDI_CHANINFO ioctl * subdevice channel info * * arg: * pointer to comedi_chaninfo structure * * reads: * comedi_chaninfo structure * * writes: * array of maxdata values to chaninfo->maxdata_list if requested * array of range table lengths to chaninfo->range_table_list if requested */ static int do_chaninfo_ioctl(struct comedi_device *dev, struct comedi_chaninfo *it) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (it->subdev >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[it->subdev]; if (it->maxdata_list) { if (s->maxdata || !s->maxdata_list) return -EINVAL; if (copy_to_user(it->maxdata_list, s->maxdata_list, s->n_chan * sizeof(unsigned int))) return -EFAULT; } if (it->flaglist) return -EINVAL; /* flaglist not supported */ if (it->rangelist) { int i; if (!s->range_table_list) return -EINVAL; for (i = 0; i < s->n_chan; i++) { int x; x = (dev->minor << 28) | (it->subdev << 24) | (i << 16) | (s->range_table_list[i]->length); if (put_user(x, it->rangelist + i)) return -EFAULT; } } return 0; } /* * COMEDI_BUFINFO ioctl * buffer information * * arg: * pointer to comedi_bufinfo structure * * reads: * comedi_bufinfo structure * * writes: * modified comedi_bufinfo structure */ static int do_bufinfo_ioctl(struct comedi_device *dev, struct comedi_bufinfo __user *arg, void *file) { struct comedi_bufinfo bi; struct comedi_subdevice *s; struct comedi_async *async; unsigned int runflags; int retval = 0; bool become_nonbusy = false; lockdep_assert_held(&dev->mutex); if (copy_from_user(&bi, arg, sizeof(bi))) return -EFAULT; if (bi.subdevice >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[bi.subdevice]; async = s->async; if (!async || s->busy != file) return -EINVAL; runflags = comedi_get_subdevice_runflags(s); if (!(async->cmd.flags & CMDF_WRITE)) { /* command was set up in "read" direction */ if (bi.bytes_read) { comedi_buf_read_alloc(s, bi.bytes_read); bi.bytes_read = comedi_buf_read_free(s, bi.bytes_read); } /* * If nothing left to read, and command has stopped, and * {"read" position not updated or command stopped normally}, * then become non-busy. */ if (comedi_buf_read_n_available(s) == 0 && !comedi_is_runflags_running(runflags) && (bi.bytes_read == 0 || !comedi_is_runflags_in_error(runflags))) { become_nonbusy = true; if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; } bi.bytes_written = 0; } else { /* command was set up in "write" direction */ if (!comedi_is_runflags_running(runflags)) { bi.bytes_written = 0; become_nonbusy = true; if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; } else if (bi.bytes_written) { comedi_buf_write_alloc(s, bi.bytes_written); bi.bytes_written = comedi_buf_write_free(s, bi.bytes_written); } bi.bytes_read = 0; } bi.buf_write_count = async->buf_write_count; bi.buf_write_ptr = async->buf_write_ptr; bi.buf_read_count = async->buf_read_count; bi.buf_read_ptr = async->buf_read_ptr; if (become_nonbusy) do_become_nonbusy(dev, s); if (retval) return retval; if (copy_to_user(arg, &bi, sizeof(bi))) return -EFAULT; return 0; } static int check_insn_config_length(struct comedi_insn *insn, unsigned int *data) { if (insn->n < 1) return -EINVAL; switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: case INSN_CONFIG_DIO_INPUT: case INSN_CONFIG_DISARM: case INSN_CONFIG_RESET: if (insn->n == 1) return 0; break; case INSN_CONFIG_ARM: case INSN_CONFIG_DIO_QUERY: case INSN_CONFIG_BLOCK_SIZE: case INSN_CONFIG_FILTER: case INSN_CONFIG_SERIAL_CLOCK: case INSN_CONFIG_BIDIRECTIONAL_DATA: case INSN_CONFIG_ALT_SOURCE: case INSN_CONFIG_SET_COUNTER_MODE: case INSN_CONFIG_8254_READ_STATUS: case INSN_CONFIG_SET_ROUTING: case INSN_CONFIG_GET_ROUTING: case INSN_CONFIG_GET_PWM_STATUS: case INSN_CONFIG_PWM_SET_PERIOD: case INSN_CONFIG_PWM_GET_PERIOD: if (insn->n == 2) return 0; break; case INSN_CONFIG_SET_GATE_SRC: case INSN_CONFIG_GET_GATE_SRC: case INSN_CONFIG_SET_CLOCK_SRC: case INSN_CONFIG_GET_CLOCK_SRC: case INSN_CONFIG_SET_OTHER_SRC: case INSN_CONFIG_GET_COUNTER_STATUS: case INSN_CONFIG_GET_PWM_OUTPUT: case INSN_CONFIG_PWM_SET_H_BRIDGE: case INSN_CONFIG_PWM_GET_H_BRIDGE: case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: if (insn->n == 3) return 0; break; case INSN_CONFIG_PWM_OUTPUT: case INSN_CONFIG_ANALOG_TRIG: case INSN_CONFIG_TIMER_1: if (insn->n == 5) return 0; break; case INSN_CONFIG_DIGITAL_TRIG: if (insn->n == 6) return 0; break; case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS: if (insn->n >= 4) return 0; break; /* * by default we allow the insn since we don't have checks for * all possible cases yet */ default: pr_warn("No check for data length of config insn id %i is implemented\n", data[0]); pr_warn("Add a check to %s in %s\n", __func__, __FILE__); pr_warn("Assuming n=%i is correct\n", insn->n); return 0; } return -EINVAL; } static int check_insn_device_config_length(struct comedi_insn *insn, unsigned int *data) { if (insn->n < 1) return -EINVAL; switch (data[0]) { case INSN_DEVICE_CONFIG_TEST_ROUTE: case INSN_DEVICE_CONFIG_CONNECT_ROUTE: case INSN_DEVICE_CONFIG_DISCONNECT_ROUTE: if (insn->n == 3) return 0; break; case INSN_DEVICE_CONFIG_GET_ROUTES: /* * Big enough for config_id and the length of the userland * memory buffer. Additional length should be in factors of 2 * to communicate any returned route pairs (source,destination). */ if (insn->n >= 2) return 0; break; } return -EINVAL; } /** * get_valid_routes() - Calls low-level driver get_valid_routes function to * either return a count of valid routes to user, or copy * of list of all valid device routes to buffer in * userspace. * @dev: comedi device pointer * @data: data from user insn call. The length of the data must be >= 2. * data[0] must contain the INSN_DEVICE_CONFIG config_id. * data[1](input) contains the number of _pairs_ for which memory is * allotted from the user. If the user specifies '0', then only * the number of pairs available is returned. * data[1](output) returns either the number of pairs available (if none * where requested) or the number of _pairs_ that are copied back * to the user. * data[2::2] returns each (source, destination) pair. * * Return: -EINVAL if low-level driver does not allocate and return routes as * expected. Returns 0 otherwise. */ static int get_valid_routes(struct comedi_device *dev, unsigned int *data) { lockdep_assert_held(&dev->mutex); data[1] = dev->get_valid_routes(dev, data[1], data + 2); return 0; } static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data, void *file) { struct comedi_subdevice *s; int ret = 0; int i; lockdep_assert_held(&dev->mutex); if (insn->insn & INSN_MASK_SPECIAL) { /* a non-subdevice instruction */ switch (insn->insn) { case INSN_GTOD: { struct timespec64 tv; if (insn->n != 2) { ret = -EINVAL; break; } ktime_get_real_ts64(&tv); /* unsigned data safe until 2106 */ data[0] = (unsigned int)tv.tv_sec; data[1] = tv.tv_nsec / NSEC_PER_USEC; ret = 2; break; } case INSN_WAIT: if (insn->n != 1 || data[0] >= 100000) { ret = -EINVAL; break; } udelay(data[0] / 1000); ret = 1; break; case INSN_INTTRIG: if (insn->n != 1) { ret = -EINVAL; break; } if (insn->subdev >= dev->n_subdevices) { dev_dbg(dev->class_dev, "%d not usable subdevice\n", insn->subdev); ret = -EINVAL; break; } s = &dev->subdevices[insn->subdev]; if (!s->async) { dev_dbg(dev->class_dev, "no async\n"); ret = -EINVAL; break; } if (!s->async->inttrig) { dev_dbg(dev->class_dev, "no inttrig\n"); ret = -EAGAIN; break; } ret = s->async->inttrig(dev, s, data[0]); if (ret >= 0) ret = 1; break; case INSN_DEVICE_CONFIG: ret = check_insn_device_config_length(insn, data); if (ret) break; if (data[0] == INSN_DEVICE_CONFIG_GET_ROUTES) { /* * data[1] should be the number of _pairs_ that * the memory can hold. */ data[1] = (insn->n - 2) / 2; ret = get_valid_routes(dev, data); break; } /* other global device config instructions. */ ret = dev->insn_device_config(dev, insn, data); break; default: dev_dbg(dev->class_dev, "invalid insn\n"); ret = -EINVAL; break; } } else { /* a subdevice instruction */ unsigned int maxdata; if (insn->subdev >= dev->n_subdevices) { dev_dbg(dev->class_dev, "subdevice %d out of range\n", insn->subdev); ret = -EINVAL; goto out; } s = &dev->subdevices[insn->subdev]; if (s->type == COMEDI_SUBD_UNUSED) { dev_dbg(dev->class_dev, "%d not usable subdevice\n", insn->subdev); ret = -EIO; goto out; } /* are we locked? (ioctl lock) */ if (s->lock && s->lock != file) { dev_dbg(dev->class_dev, "device locked\n"); ret = -EACCES; goto out; } ret = comedi_check_chanlist(s, 1, &insn->chanspec); if (ret < 0) { ret = -EINVAL; dev_dbg(dev->class_dev, "bad chanspec\n"); goto out; } if (s->busy) { ret = -EBUSY; goto out; } /* This looks arbitrary. It is. */ s->busy = parse_insn; switch (insn->insn) { case INSN_READ: ret = s->insn_read(dev, s, insn, data); if (ret == -ETIMEDOUT) { dev_dbg(dev->class_dev, "subdevice %d read instruction timed out\n", s->index); } break; case INSN_WRITE: maxdata = s->maxdata_list ? s->maxdata_list[CR_CHAN(insn->chanspec)] : s->maxdata; for (i = 0; i < insn->n; ++i) { if (data[i] > maxdata) { ret = -EINVAL; dev_dbg(dev->class_dev, "bad data value(s)\n"); break; } } if (ret == 0) { ret = s->insn_write(dev, s, insn, data); if (ret == -ETIMEDOUT) { dev_dbg(dev->class_dev, "subdevice %d write instruction timed out\n", s->index); } } break; case INSN_BITS: if (insn->n != 2) { ret = -EINVAL; } else { /* * Most drivers ignore the base channel in * insn->chanspec. Fix this here if * the subdevice has <= 32 channels. */ unsigned int orig_mask = data[0]; unsigned int shift = 0; if (s->n_chan <= 32) { shift = CR_CHAN(insn->chanspec); if (shift > 0) { insn->chanspec = 0; data[0] <<= shift; data[1] <<= shift; } } ret = s->insn_bits(dev, s, insn, data); data[0] = orig_mask; if (shift > 0) data[1] >>= shift; } break; case INSN_CONFIG: ret = check_insn_config_length(insn, data); if (ret) break; ret = s->insn_config(dev, s, insn, data); break; default: ret = -EINVAL; break; } s->busy = NULL; } out: return ret; } /* * COMEDI_INSNLIST ioctl * synchronous instruction list * * arg: * pointer to comedi_insnlist structure * * reads: * comedi_insnlist structure * array of comedi_insn structures from insnlist->insns pointer * data (for writes) from insns[].data pointers * * writes: * data (for reads) to insns[].data pointers */ /* arbitrary limits */ #define MIN_SAMPLES 16 #define MAX_SAMPLES 65536 static int do_insnlist_ioctl(struct comedi_device *dev, struct comedi_insn *insns, unsigned int n_insns, void *file) { unsigned int *data = NULL; unsigned int max_n_data_required = MIN_SAMPLES; int i = 0; int ret = 0; lockdep_assert_held(&dev->mutex); /* Determine maximum memory needed for all instructions. */ for (i = 0; i < n_insns; ++i) { if (insns[i].n > MAX_SAMPLES) { dev_dbg(dev->class_dev, "number of samples too large\n"); ret = -EINVAL; goto error; } max_n_data_required = max(max_n_data_required, insns[i].n); } /* Allocate scratch space for all instruction data. */ data = kmalloc_array(max_n_data_required, sizeof(unsigned int), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto error; } for (i = 0; i < n_insns; ++i) { if (insns[i].insn & INSN_MASK_WRITE) { if (copy_from_user(data, insns[i].data, insns[i].n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_from_user failed\n"); ret = -EFAULT; goto error; } } ret = parse_insn(dev, insns + i, data, file); if (ret < 0) goto error; if (insns[i].insn & INSN_MASK_READ) { if (copy_to_user(insns[i].data, data, insns[i].n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_to_user failed\n"); ret = -EFAULT; goto error; } } if (need_resched()) schedule(); } error: kfree(data); if (ret < 0) return ret; return i; } /* * COMEDI_INSN ioctl * synchronous instruction * * arg: * pointer to comedi_insn structure * * reads: * comedi_insn structure * data (for writes) from insn->data pointer * * writes: * data (for reads) to insn->data pointer */ static int do_insn_ioctl(struct comedi_device *dev, struct comedi_insn *insn, void *file) { unsigned int *data = NULL; unsigned int n_data = MIN_SAMPLES; int ret = 0; lockdep_assert_held(&dev->mutex); n_data = max(n_data, insn->n); /* This is where the behavior of insn and insnlist deviate. */ if (insn->n > MAX_SAMPLES) { insn->n = MAX_SAMPLES; n_data = MAX_SAMPLES; } data = kmalloc_array(n_data, sizeof(unsigned int), GFP_KERNEL); if (!data) { ret = -ENOMEM; goto error; } if (insn->insn & INSN_MASK_WRITE) { if (copy_from_user(data, insn->data, insn->n * sizeof(unsigned int))) { ret = -EFAULT; goto error; } } ret = parse_insn(dev, insn, data, file); if (ret < 0) goto error; if (insn->insn & INSN_MASK_READ) { if (copy_to_user(insn->data, data, insn->n * sizeof(unsigned int))) { ret = -EFAULT; goto error; } } ret = insn->n; error: kfree(data); return ret; } static int __comedi_get_user_cmd(struct comedi_device *dev, struct comedi_cmd *cmd) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (cmd->subdev >= dev->n_subdevices) { dev_dbg(dev->class_dev, "%d no such subdevice\n", cmd->subdev); return -ENODEV; } s = &dev->subdevices[cmd->subdev]; if (s->type == COMEDI_SUBD_UNUSED) { dev_dbg(dev->class_dev, "%d not valid subdevice\n", cmd->subdev); return -EIO; } if (!s->do_cmd || !s->do_cmdtest || !s->async) { dev_dbg(dev->class_dev, "subdevice %d does not support commands\n", cmd->subdev); return -EIO; } /* make sure channel/gain list isn't too long */ if (cmd->chanlist_len > s->len_chanlist) { dev_dbg(dev->class_dev, "channel/gain list too long %d > %d\n", cmd->chanlist_len, s->len_chanlist); return -EINVAL; } /* * Set the CMDF_WRITE flag to the correct state if the subdevice * supports only "read" commands or only "write" commands. */ switch (s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) { case SDF_CMD_READ: cmd->flags &= ~CMDF_WRITE; break; case SDF_CMD_WRITE: cmd->flags |= CMDF_WRITE; break; default: break; } return 0; } static int __comedi_get_user_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int __user *user_chanlist, struct comedi_cmd *cmd) { unsigned int *chanlist; int ret; lockdep_assert_held(&dev->mutex); cmd->chanlist = NULL; chanlist = memdup_array_user(user_chanlist, cmd->chanlist_len, sizeof(unsigned int)); if (IS_ERR(chanlist)) return PTR_ERR(chanlist); /* make sure each element in channel/gain list is valid */ ret = comedi_check_chanlist(s, cmd->chanlist_len, chanlist); if (ret < 0) { kfree(chanlist); return ret; } cmd->chanlist = chanlist; return 0; } /* * COMEDI_CMD ioctl * asynchronous acquisition command set-up * * arg: * pointer to comedi_cmd structure * * reads: * comedi_cmd structure * channel/range list from cmd->chanlist pointer * * writes: * possibly modified comedi_cmd structure (when -EAGAIN returned) */ static int do_cmd_ioctl(struct comedi_device *dev, struct comedi_cmd *cmd, bool *copy, void *file) { struct comedi_subdevice *s; struct comedi_async *async; unsigned int __user *user_chanlist; int ret; lockdep_assert_held(&dev->mutex); /* do some simple cmd validation */ ret = __comedi_get_user_cmd(dev, cmd); if (ret) return ret; /* save user's chanlist pointer so it can be restored later */ user_chanlist = (unsigned int __user *)cmd->chanlist; s = &dev->subdevices[cmd->subdev]; async = s->async; /* are we locked? (ioctl lock) */ if (s->lock && s->lock != file) { dev_dbg(dev->class_dev, "subdevice locked\n"); return -EACCES; } /* are we busy? */ if (s->busy) { dev_dbg(dev->class_dev, "subdevice busy\n"); return -EBUSY; } /* make sure channel/gain list isn't too short */ if (cmd->chanlist_len < 1) { dev_dbg(dev->class_dev, "channel/gain list too short %u < 1\n", cmd->chanlist_len); return -EINVAL; } async->cmd = *cmd; async->cmd.data = NULL; /* load channel/gain list */ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &async->cmd); if (ret) goto cleanup; ret = s->do_cmdtest(dev, s, &async->cmd); if (async->cmd.flags & CMDF_BOGUS || ret) { dev_dbg(dev->class_dev, "test returned %d\n", ret); *cmd = async->cmd; /* restore chanlist pointer before copying back */ cmd->chanlist = (unsigned int __force *)user_chanlist; cmd->data = NULL; *copy = true; ret = -EAGAIN; goto cleanup; } if (!async->prealloc_bufsz) { ret = -ENOMEM; dev_dbg(dev->class_dev, "no buffer (?)\n"); goto cleanup; } comedi_buf_reset(s); async->cb_mask = COMEDI_CB_BLOCK | COMEDI_CB_CANCEL_MASK; if (async->cmd.flags & CMDF_WAKE_EOS) async->cb_mask |= COMEDI_CB_EOS; comedi_update_subdevice_runflags(s, COMEDI_SRF_BUSY_MASK, COMEDI_SRF_RUNNING); /* * Set s->busy _after_ setting COMEDI_SRF_RUNNING flag to avoid * race with comedi_read() or comedi_write(). */ s->busy = file; ret = s->do_cmd(dev, s); if (ret == 0) return 0; cleanup: do_become_nonbusy(dev, s); return ret; } /* * COMEDI_CMDTEST ioctl * asynchronous acquisition command testing * * arg: * pointer to comedi_cmd structure * * reads: * comedi_cmd structure * channel/range list from cmd->chanlist pointer * * writes: * possibly modified comedi_cmd structure */ static int do_cmdtest_ioctl(struct comedi_device *dev, struct comedi_cmd *cmd, bool *copy, void *file) { struct comedi_subdevice *s; unsigned int __user *user_chanlist; int ret; lockdep_assert_held(&dev->mutex); /* do some simple cmd validation */ ret = __comedi_get_user_cmd(dev, cmd); if (ret) return ret; /* save user's chanlist pointer so it can be restored later */ user_chanlist = (unsigned int __user *)cmd->chanlist; s = &dev->subdevices[cmd->subdev]; /* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */ if (user_chanlist) { /* load channel/gain list */ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, cmd); if (ret) return ret; } ret = s->do_cmdtest(dev, s, cmd); kfree(cmd->chanlist); /* free kernel copy of user chanlist */ /* restore chanlist pointer before copying back */ cmd->chanlist = (unsigned int __force *)user_chanlist; *copy = true; return ret; } /* * COMEDI_LOCK ioctl * lock subdevice * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { int ret = 0; unsigned long flags; struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; spin_lock_irqsave(&s->spin_lock, flags); if (s->busy || s->lock) ret = -EBUSY; else s->lock = file; spin_unlock_irqrestore(&s->spin_lock, flags); return ret; } /* * COMEDI_UNLOCK ioctl * unlock subdevice * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; if (s->busy) return -EBUSY; if (s->lock && s->lock != file) return -EACCES; if (s->lock == file) s->lock = NULL; return 0; } /* * COMEDI_CANCEL ioctl * cancel asynchronous acquisition * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; if (!s->async) return -EINVAL; if (!s->busy) return 0; if (s->busy != file) return -EBUSY; return do_cancel(dev, s); } /* * COMEDI_POLL ioctl * instructs driver to synchronize buffers * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s = &dev->subdevices[arg]; if (!s->busy) return 0; if (s->busy != file) return -EBUSY; if (s->poll) return s->poll(dev, s); return -EINVAL; } /* * COMEDI_SETRSUBD ioctl * sets the current "read" subdevice on a per-file basis * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg, struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_subdevice *s_old, *s_new; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s_new = &dev->subdevices[arg]; s_old = comedi_file_read_subdevice(file); if (s_old == s_new) return 0; /* no change */ if (!(s_new->subdev_flags & SDF_CMD_READ)) return -EINVAL; /* * Check the file isn't still busy handling a "read" command on the * old subdevice (if any). */ if (s_old && s_old->busy == file && s_old->async && !(s_old->async->cmd.flags & CMDF_WRITE)) return -EBUSY; WRITE_ONCE(cfp->read_subdev, s_new); return 0; } /* * COMEDI_SETWSUBD ioctl * sets the current "write" subdevice on a per-file basis * * arg: * subdevice number * * reads: * nothing * * writes: * nothing */ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg, struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_subdevice *s_old, *s_new; lockdep_assert_held(&dev->mutex); if (arg >= dev->n_subdevices) return -EINVAL; s_new = &dev->subdevices[arg]; s_old = comedi_file_write_subdevice(file); if (s_old == s_new) return 0; /* no change */ if (!(s_new->subdev_flags & SDF_CMD_WRITE)) return -EINVAL; /* * Check the file isn't still busy handling a "write" command on the * old subdevice (if any). */ if (s_old && s_old->busy == file && s_old->async && (s_old->async->cmd.flags & CMDF_WRITE)) return -EBUSY; WRITE_ONCE(cfp->write_subdev, s_new); return 0; } static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file_inode(file)); struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; int rc; mutex_lock(&dev->mutex); /* * Device config is special, because it must work on * an unconfigured device. */ if (cmd == COMEDI_DEVCONFIG) { if (minor >= COMEDI_NUM_BOARD_MINORS) { /* Device config not appropriate on non-board minors. */ rc = -ENOTTY; goto done; } rc = do_devconfig_ioctl(dev, (struct comedi_devconfig __user *)arg); if (rc == 0) { if (arg == 0 && dev->minor >= comedi_num_legacy_minors) { /* * Successfully unconfigured a dynamically * allocated device. Try and remove it. */ if (comedi_clear_board_dev(dev)) { mutex_unlock(&dev->mutex); comedi_free_board_dev(dev); return rc; } } } goto done; } if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); rc = -ENODEV; goto done; } switch (cmd) { case COMEDI_BUFCONFIG: rc = do_bufconfig_ioctl(dev, (struct comedi_bufconfig __user *)arg); break; case COMEDI_DEVINFO: rc = do_devinfo_ioctl(dev, (struct comedi_devinfo __user *)arg, file); break; case COMEDI_SUBDINFO: rc = do_subdinfo_ioctl(dev, (struct comedi_subdinfo __user *)arg, file); break; case COMEDI_CHANINFO: { struct comedi_chaninfo it; if (copy_from_user(&it, (void __user *)arg, sizeof(it))) rc = -EFAULT; else rc = do_chaninfo_ioctl(dev, &it); break; } case COMEDI_RANGEINFO: { struct comedi_rangeinfo it; if (copy_from_user(&it, (void __user *)arg, sizeof(it))) rc = -EFAULT; else rc = do_rangeinfo_ioctl(dev, &it); break; } case COMEDI_BUFINFO: rc = do_bufinfo_ioctl(dev, (struct comedi_bufinfo __user *)arg, file); break; case COMEDI_LOCK: rc = do_lock_ioctl(dev, arg, file); break; case COMEDI_UNLOCK: rc = do_unlock_ioctl(dev, arg, file); break; case COMEDI_CANCEL: rc = do_cancel_ioctl(dev, arg, file); break; case COMEDI_CMD: { struct comedi_cmd cmd; bool copy = false; if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) { rc = -EFAULT; break; } rc = do_cmd_ioctl(dev, &cmd, ©, file); if (copy && copy_to_user((void __user *)arg, &cmd, sizeof(cmd))) rc = -EFAULT; break; } case COMEDI_CMDTEST: { struct comedi_cmd cmd; bool copy = false; if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) { rc = -EFAULT; break; } rc = do_cmdtest_ioctl(dev, &cmd, ©, file); if (copy && copy_to_user((void __user *)arg, &cmd, sizeof(cmd))) rc = -EFAULT; break; } case COMEDI_INSNLIST: { struct comedi_insnlist insnlist; struct comedi_insn *insns = NULL; if (copy_from_user(&insnlist, (void __user *)arg, sizeof(insnlist))) { rc = -EFAULT; break; } insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) { rc = -ENOMEM; break; } if (copy_from_user(insns, insnlist.insns, sizeof(*insns) * insnlist.n_insns)) { rc = -EFAULT; kfree(insns); break; } rc = do_insnlist_ioctl(dev, insns, insnlist.n_insns, file); kfree(insns); break; } case COMEDI_INSN: { struct comedi_insn insn; if (copy_from_user(&insn, (void __user *)arg, sizeof(insn))) rc = -EFAULT; else rc = do_insn_ioctl(dev, &insn, file); break; } case COMEDI_POLL: rc = do_poll_ioctl(dev, arg, file); break; case COMEDI_SETRSUBD: rc = do_setrsubd_ioctl(dev, arg, file); break; case COMEDI_SETWSUBD: rc = do_setwsubd_ioctl(dev, arg, file); break; default: rc = -ENOTTY; break; } done: mutex_unlock(&dev->mutex); return rc; } static void comedi_vm_open(struct vm_area_struct *area) { struct comedi_buf_map *bm; bm = area->vm_private_data; comedi_buf_map_get(bm); } static void comedi_vm_close(struct vm_area_struct *area) { struct comedi_buf_map *bm; bm = area->vm_private_data; comedi_buf_map_put(bm); } static int comedi_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { struct comedi_buf_map *bm = vma->vm_private_data; unsigned long offset = addr - vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT); if (len < 0) return -EINVAL; if (len > vma->vm_end - addr) len = vma->vm_end - addr; return comedi_buf_map_access(bm, offset, buf, len, write); } static const struct vm_operations_struct comedi_vm_ops = { .open = comedi_vm_open, .close = comedi_vm_close, .access = comedi_vm_access, }; static int comedi_mmap(struct file *file, struct vm_area_struct *vma) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s; struct comedi_async *async; struct comedi_buf_map *bm = NULL; struct comedi_buf_page *buf; unsigned long start = vma->vm_start; unsigned long size; int n_pages; int i; int retval = 0; /* * 'trylock' avoids circular dependency with current->mm->mmap_lock * and down-reading &dev->attach_lock should normally succeed without * contention unless the device is in the process of being attached * or detached. */ if (!down_read_trylock(&dev->attach_lock)) return -EAGAIN; if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); retval = -ENODEV; goto done; } if (vma->vm_flags & VM_WRITE) s = comedi_file_write_subdevice(file); else s = comedi_file_read_subdevice(file); if (!s) { retval = -EINVAL; goto done; } async = s->async; if (!async) { retval = -EINVAL; goto done; } if (vma->vm_pgoff != 0) { dev_dbg(dev->class_dev, "mmap() offset must be 0.\n"); retval = -EINVAL; goto done; } size = vma->vm_end - vma->vm_start; if (size > async->prealloc_bufsz) { retval = -EFAULT; goto done; } if (offset_in_page(size)) { retval = -EFAULT; goto done; } n_pages = vma_pages(vma); /* get reference to current buf map (if any) */ bm = comedi_buf_map_from_subdev_get(s); if (!bm || n_pages > bm->n_pages) { retval = -EINVAL; goto done; } if (bm->dma_dir != DMA_NONE) { unsigned long vm_start = vma->vm_start; unsigned long vm_end = vma->vm_end; /* * Buffer pages are not contiguous, so temporarily modify VMA * start and end addresses for each buffer page. */ for (i = 0; i < n_pages; ++i) { buf = &bm->page_list[i]; vma->vm_start = start; vma->vm_end = start + PAGE_SIZE; retval = dma_mmap_coherent(bm->dma_hw_dev, vma, buf->virt_addr, buf->dma_addr, PAGE_SIZE); if (retval) break; start += PAGE_SIZE; } vma->vm_start = vm_start; vma->vm_end = vm_end; } else { for (i = 0; i < n_pages; ++i) { unsigned long pfn; buf = &bm->page_list[i]; pfn = page_to_pfn(virt_to_page(buf->virt_addr)); retval = remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED); if (retval) break; start += PAGE_SIZE; } } #ifdef CONFIG_MMU /* * Leaving behind a partial mapping of a buffer we're about to drop is * unsafe, see remap_pfn_range_notrack(). We need to zap the range * here ourselves instead of relying on the automatic zapping in * remap_pfn_range() because we call remap_pfn_range() in a loop. */ if (retval) zap_vma_ptes(vma, vma->vm_start, size); #endif if (retval == 0) { vma->vm_ops = &comedi_vm_ops; vma->vm_private_data = bm; vma->vm_ops->open(vma); } done: up_read(&dev->attach_lock); comedi_buf_map_put(bm); /* put reference to buf map - okay if NULL */ return retval; } static __poll_t comedi_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s, *s_read; down_read(&dev->attach_lock); if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); goto done; } s = comedi_file_read_subdevice(file); s_read = s; if (s && s->async) { poll_wait(file, &s->async->wait_head, wait); if (s->busy != file || !comedi_is_subdevice_running(s) || (s->async->cmd.flags & CMDF_WRITE) || comedi_buf_read_n_available(s) > 0) mask |= EPOLLIN | EPOLLRDNORM; } s = comedi_file_write_subdevice(file); if (s && s->async) { unsigned int bps = comedi_bytes_per_sample(s); if (s != s_read) poll_wait(file, &s->async->wait_head, wait); if (s->busy != file || !comedi_is_subdevice_running(s) || !(s->async->cmd.flags & CMDF_WRITE) || comedi_buf_write_n_available(s) >= bps) mask |= EPOLLOUT | EPOLLWRNORM; } done: up_read(&dev->attach_lock); return mask; } static unsigned int comedi_buf_copy_to_user(struct comedi_subdevice *s, void __user *dest, unsigned int src_offset, unsigned int n) { struct comedi_buf_map *bm = s->async->buf_map; struct comedi_buf_page *buf_page_list = bm->page_list; unsigned int page = src_offset >> PAGE_SHIFT; unsigned int offset = offset_in_page(src_offset); while (n) { unsigned int copy_amount = min(n, PAGE_SIZE - offset); unsigned int uncopied; uncopied = copy_to_user(dest, buf_page_list[page].virt_addr + offset, copy_amount); copy_amount -= uncopied; n -= copy_amount; if (uncopied) break; dest += copy_amount; page++; if (page == bm->n_pages) page = 0; /* buffer wraparound */ offset = 0; } return n; } static unsigned int comedi_buf_copy_from_user(struct comedi_subdevice *s, unsigned int dst_offset, const void __user *src, unsigned int n) { struct comedi_buf_map *bm = s->async->buf_map; struct comedi_buf_page *buf_page_list = bm->page_list; unsigned int page = dst_offset >> PAGE_SHIFT; unsigned int offset = offset_in_page(dst_offset); while (n) { unsigned int copy_amount = min(n, PAGE_SIZE - offset); unsigned int uncopied; uncopied = copy_from_user(buf_page_list[page].virt_addr + offset, src, copy_amount); copy_amount -= uncopied; n -= copy_amount; if (uncopied) break; src += copy_amount; page++; if (page == bm->n_pages) page = 0; /* buffer wraparound */ offset = 0; } return n; } static ssize_t comedi_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *offset) { struct comedi_subdevice *s; struct comedi_async *async; unsigned int n, m; ssize_t count = 0; int retval = 0; DECLARE_WAITQUEUE(wait, current); struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; bool become_nonbusy = false; bool attach_locked; unsigned int old_detach_count; /* Protect against device detachment during operation. */ down_read(&dev->attach_lock); attach_locked = true; old_detach_count = dev->detach_count; if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); retval = -ENODEV; goto out; } s = comedi_file_write_subdevice(file); if (!s || !s->async) { retval = -EIO; goto out; } async = s->async; if (s->busy != file || !(async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; goto out; } add_wait_queue(&async->wait_head, &wait); while (count == 0 && !retval) { unsigned int runflags; set_current_state(TASK_INTERRUPTIBLE); runflags = comedi_get_subdevice_runflags(s); if (!comedi_is_runflags_running(runflags)) { if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; if (retval || nbytes) become_nonbusy = true; break; } if (nbytes == 0) break; /* Allocate all free buffer space. */ comedi_buf_write_alloc(s, async->prealloc_bufsz); m = comedi_buf_write_n_allocated(s); n = min_t(size_t, m, nbytes); if (n == 0) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (s->busy != file || !(async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; break; } continue; } set_current_state(TASK_RUNNING); m = comedi_buf_copy_from_user(s, async->buf_write_ptr, buf, n); if (m) { n -= m; retval = -EFAULT; } comedi_buf_write_free(s, n); count += n; nbytes -= n; buf += n; } remove_wait_queue(&async->wait_head, &wait); set_current_state(TASK_RUNNING); if (become_nonbusy && count == 0) { struct comedi_subdevice *new_s; /* * To avoid deadlock, cannot acquire dev->mutex * while dev->attach_lock is held. */ up_read(&dev->attach_lock); attach_locked = false; mutex_lock(&dev->mutex); /* * Check device hasn't become detached behind our back. * Checking dev->detach_count is unchanged ought to be * sufficient (unless there have been 2**32 detaches in the * meantime!), but check the subdevice pointer as well just in * case. * * Also check the subdevice is still in a suitable state to * become non-busy in case it changed behind our back. */ new_s = comedi_file_write_subdevice(file); if (dev->attached && old_detach_count == dev->detach_count && s == new_s && new_s->async == async && s->busy == file && (async->cmd.flags & CMDF_WRITE) && !comedi_is_subdevice_running(s)) do_become_nonbusy(dev, s); mutex_unlock(&dev->mutex); } out: if (attach_locked) up_read(&dev->attach_lock); return count ? count : retval; } static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, loff_t *offset) { struct comedi_subdevice *s; struct comedi_async *async; unsigned int n, m; ssize_t count = 0; int retval = 0; DECLARE_WAITQUEUE(wait, current); struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; unsigned int old_detach_count; bool become_nonbusy = false; bool attach_locked; /* Protect against device detachment during operation. */ down_read(&dev->attach_lock); attach_locked = true; old_detach_count = dev->detach_count; if (!dev->attached) { dev_dbg(dev->class_dev, "no driver attached\n"); retval = -ENODEV; goto out; } s = comedi_file_read_subdevice(file); if (!s || !s->async) { retval = -EIO; goto out; } async = s->async; if (s->busy != file || (async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; goto out; } add_wait_queue(&async->wait_head, &wait); while (count == 0 && !retval) { set_current_state(TASK_INTERRUPTIBLE); m = comedi_buf_read_n_available(s); n = min_t(size_t, m, nbytes); if (n == 0) { unsigned int runflags = comedi_get_subdevice_runflags(s); if (!comedi_is_runflags_running(runflags)) { if (comedi_is_runflags_in_error(runflags)) retval = -EPIPE; if (retval || nbytes) become_nonbusy = true; break; } if (nbytes == 0) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (s->busy != file || (async->cmd.flags & CMDF_WRITE)) { retval = -EINVAL; break; } continue; } set_current_state(TASK_RUNNING); m = comedi_buf_copy_to_user(s, buf, async->buf_read_ptr, n); if (m) { n -= m; retval = -EFAULT; } comedi_buf_read_alloc(s, n); comedi_buf_read_free(s, n); count += n; nbytes -= n; buf += n; } remove_wait_queue(&async->wait_head, &wait); set_current_state(TASK_RUNNING); if (become_nonbusy && count == 0) { struct comedi_subdevice *new_s; /* * To avoid deadlock, cannot acquire dev->mutex * while dev->attach_lock is held. */ up_read(&dev->attach_lock); attach_locked = false; mutex_lock(&dev->mutex); /* * Check device hasn't become detached behind our back. * Checking dev->detach_count is unchanged ought to be * sufficient (unless there have been 2**32 detaches in the * meantime!), but check the subdevice pointer as well just in * case. * * Also check the subdevice is still in a suitable state to * become non-busy in case it changed behind our back. */ new_s = comedi_file_read_subdevice(file); if (dev->attached && old_detach_count == dev->detach_count && s == new_s && new_s->async == async && s->busy == file && !(async->cmd.flags & CMDF_WRITE) && !comedi_is_subdevice_running(s) && comedi_buf_read_n_available(s) == 0) do_become_nonbusy(dev, s); mutex_unlock(&dev->mutex); } out: if (attach_locked) up_read(&dev->attach_lock); return count ? count : retval; } static int comedi_open(struct inode *inode, struct file *file) { const unsigned int minor = iminor(inode); struct comedi_file *cfp; struct comedi_device *dev = comedi_dev_get_from_minor(minor); int rc; if (!dev) { pr_debug("invalid minor number\n"); return -ENODEV; } cfp = kzalloc(sizeof(*cfp), GFP_KERNEL); if (!cfp) { comedi_dev_put(dev); return -ENOMEM; } cfp->dev = dev; mutex_lock(&dev->mutex); if (!dev->attached && !capable(CAP_SYS_ADMIN)) { dev_dbg(dev->class_dev, "not attached and not CAP_SYS_ADMIN\n"); rc = -ENODEV; goto out; } if (dev->attached && dev->use_count == 0) { if (!try_module_get(dev->driver->module)) { rc = -ENXIO; goto out; } if (dev->open) { rc = dev->open(dev); if (rc < 0) { module_put(dev->driver->module); goto out; } } } dev->use_count++; file->private_data = cfp; comedi_file_reset(file); rc = 0; out: mutex_unlock(&dev->mutex); if (rc) { comedi_dev_put(dev); kfree(cfp); } return rc; } static int comedi_fasync(int fd, struct file *file, int on) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; return fasync_helper(fd, file, on, &dev->async_queue); } static int comedi_close(struct inode *inode, struct file *file) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_subdevice *s = NULL; int i; mutex_lock(&dev->mutex); if (dev->subdevices) { for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->busy == file) do_cancel(dev, s); if (s->lock == file) s->lock = NULL; } } if (dev->attached && dev->use_count == 1) { if (dev->close) dev->close(dev); module_put(dev->driver->module); } dev->use_count--; mutex_unlock(&dev->mutex); comedi_dev_put(dev); kfree(cfp); return 0; } #ifdef CONFIG_COMPAT #define COMEDI32_CHANINFO _IOR(CIO, 3, struct comedi32_chaninfo_struct) #define COMEDI32_RANGEINFO _IOR(CIO, 8, struct comedi32_rangeinfo_struct) /* * N.B. COMEDI32_CMD and COMEDI_CMD ought to use _IOWR, not _IOR. * It's too late to change it now, but it only affects the command number. */ #define COMEDI32_CMD _IOR(CIO, 9, struct comedi32_cmd_struct) /* * N.B. COMEDI32_CMDTEST and COMEDI_CMDTEST ought to use _IOWR, not _IOR. * It's too late to change it now, but it only affects the command number. */ #define COMEDI32_CMDTEST _IOR(CIO, 10, struct comedi32_cmd_struct) #define COMEDI32_INSNLIST _IOR(CIO, 11, struct comedi32_insnlist_struct) #define COMEDI32_INSN _IOR(CIO, 12, struct comedi32_insn_struct) struct comedi32_chaninfo_struct { unsigned int subdev; compat_uptr_t maxdata_list; /* 32-bit 'unsigned int *' */ compat_uptr_t flaglist; /* 32-bit 'unsigned int *' */ compat_uptr_t rangelist; /* 32-bit 'unsigned int *' */ unsigned int unused[4]; }; struct comedi32_rangeinfo_struct { unsigned int range_type; compat_uptr_t range_ptr; /* 32-bit 'void *' */ }; struct comedi32_cmd_struct { unsigned int subdev; unsigned int flags; unsigned int start_src; unsigned int start_arg; unsigned int scan_begin_src; unsigned int scan_begin_arg; unsigned int convert_src; unsigned int convert_arg; unsigned int scan_end_src; unsigned int scan_end_arg; unsigned int stop_src; unsigned int stop_arg; compat_uptr_t chanlist; /* 32-bit 'unsigned int *' */ unsigned int chanlist_len; compat_uptr_t data; /* 32-bit 'short *' */ unsigned int data_len; }; struct comedi32_insn_struct { unsigned int insn; unsigned int n; compat_uptr_t data; /* 32-bit 'unsigned int *' */ unsigned int subdev; unsigned int chanspec; unsigned int unused[3]; }; struct comedi32_insnlist_struct { unsigned int n_insns; compat_uptr_t insns; /* 32-bit 'struct comedi_insn *' */ }; /* Handle 32-bit COMEDI_CHANINFO ioctl. */ static int compat_chaninfo(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi32_chaninfo_struct chaninfo32; struct comedi_chaninfo chaninfo; int err; if (copy_from_user(&chaninfo32, compat_ptr(arg), sizeof(chaninfo32))) return -EFAULT; memset(&chaninfo, 0, sizeof(chaninfo)); chaninfo.subdev = chaninfo32.subdev; chaninfo.maxdata_list = compat_ptr(chaninfo32.maxdata_list); chaninfo.flaglist = compat_ptr(chaninfo32.flaglist); chaninfo.rangelist = compat_ptr(chaninfo32.rangelist); mutex_lock(&dev->mutex); err = do_chaninfo_ioctl(dev, &chaninfo); mutex_unlock(&dev->mutex); return err; } /* Handle 32-bit COMEDI_RANGEINFO ioctl. */ static int compat_rangeinfo(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi32_rangeinfo_struct rangeinfo32; struct comedi_rangeinfo rangeinfo; int err; if (copy_from_user(&rangeinfo32, compat_ptr(arg), sizeof(rangeinfo32))) return -EFAULT; memset(&rangeinfo, 0, sizeof(rangeinfo)); rangeinfo.range_type = rangeinfo32.range_type; rangeinfo.range_ptr = compat_ptr(rangeinfo32.range_ptr); mutex_lock(&dev->mutex); err = do_rangeinfo_ioctl(dev, &rangeinfo); mutex_unlock(&dev->mutex); return err; } /* Copy 32-bit cmd structure to native cmd structure. */ static int get_compat_cmd(struct comedi_cmd *cmd, struct comedi32_cmd_struct __user *cmd32) { struct comedi32_cmd_struct v32; if (copy_from_user(&v32, cmd32, sizeof(v32))) return -EFAULT; cmd->subdev = v32.subdev; cmd->flags = v32.flags; cmd->start_src = v32.start_src; cmd->start_arg = v32.start_arg; cmd->scan_begin_src = v32.scan_begin_src; cmd->scan_begin_arg = v32.scan_begin_arg; cmd->convert_src = v32.convert_src; cmd->convert_arg = v32.convert_arg; cmd->scan_end_src = v32.scan_end_src; cmd->scan_end_arg = v32.scan_end_arg; cmd->stop_src = v32.stop_src; cmd->stop_arg = v32.stop_arg; cmd->chanlist = (unsigned int __force *)compat_ptr(v32.chanlist); cmd->chanlist_len = v32.chanlist_len; cmd->data = compat_ptr(v32.data); cmd->data_len = v32.data_len; return 0; } /* Copy native cmd structure to 32-bit cmd structure. */ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32, struct comedi_cmd *cmd) { struct comedi32_cmd_struct v32; memset(&v32, 0, sizeof(v32)); v32.subdev = cmd->subdev; v32.flags = cmd->flags; v32.start_src = cmd->start_src; v32.start_arg = cmd->start_arg; v32.scan_begin_src = cmd->scan_begin_src; v32.scan_begin_arg = cmd->scan_begin_arg; v32.convert_src = cmd->convert_src; v32.convert_arg = cmd->convert_arg; v32.scan_end_src = cmd->scan_end_src; v32.scan_end_arg = cmd->scan_end_arg; v32.stop_src = cmd->stop_src; v32.stop_arg = cmd->stop_arg; /* Assume chanlist pointer is unchanged. */ v32.chanlist = ptr_to_compat((unsigned int __user *)cmd->chanlist); v32.chanlist_len = cmd->chanlist_len; v32.data = ptr_to_compat(cmd->data); v32.data_len = cmd->data_len; if (copy_to_user(cmd32, &v32, sizeof(v32))) return -EFAULT; return 0; } /* Handle 32-bit COMEDI_CMD ioctl. */ static int compat_cmd(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_cmd cmd; bool copy = false; int rc, err; rc = get_compat_cmd(&cmd, compat_ptr(arg)); if (rc) return rc; mutex_lock(&dev->mutex); rc = do_cmd_ioctl(dev, &cmd, ©, file); mutex_unlock(&dev->mutex); if (copy) { /* Special case: copy cmd back to user. */ err = put_compat_cmd(compat_ptr(arg), &cmd); if (err) rc = err; } return rc; } /* Handle 32-bit COMEDI_CMDTEST ioctl. */ static int compat_cmdtest(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_cmd cmd; bool copy = false; int rc, err; rc = get_compat_cmd(&cmd, compat_ptr(arg)); if (rc) return rc; mutex_lock(&dev->mutex); rc = do_cmdtest_ioctl(dev, &cmd, ©, file); mutex_unlock(&dev->mutex); if (copy) { err = put_compat_cmd(compat_ptr(arg), &cmd); if (err) rc = err; } return rc; } /* Copy 32-bit insn structure to native insn structure. */ static int get_compat_insn(struct comedi_insn *insn, struct comedi32_insn_struct __user *insn32) { struct comedi32_insn_struct v32; /* Copy insn structure. Ignore the unused members. */ if (copy_from_user(&v32, insn32, sizeof(v32))) return -EFAULT; memset(insn, 0, sizeof(*insn)); insn->insn = v32.insn; insn->n = v32.n; insn->data = compat_ptr(v32.data); insn->subdev = v32.subdev; insn->chanspec = v32.chanspec; return 0; } /* Handle 32-bit COMEDI_INSNLIST ioctl. */ static int compat_insnlist(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi32_insnlist_struct insnlist32; struct comedi32_insn_struct __user *insn32; struct comedi_insn *insns; unsigned int n; int rc; if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32))) return -EFAULT; insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) return -ENOMEM; /* Copy insn structures. */ insn32 = compat_ptr(insnlist32.insns); for (n = 0; n < insnlist32.n_insns; n++) { rc = get_compat_insn(insns + n, insn32 + n); if (rc) { kfree(insns); return rc; } } mutex_lock(&dev->mutex); rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file); mutex_unlock(&dev->mutex); kfree(insns); return rc; } /* Handle 32-bit COMEDI_INSN ioctl. */ static int compat_insn(struct file *file, unsigned long arg) { struct comedi_file *cfp = file->private_data; struct comedi_device *dev = cfp->dev; struct comedi_insn insn; int rc; rc = get_compat_insn(&insn, (void __user *)arg); if (rc) return rc; mutex_lock(&dev->mutex); rc = do_insn_ioctl(dev, &insn, file); mutex_unlock(&dev->mutex); return rc; } /* * compat_ioctl file operation. * * Returns -ENOIOCTLCMD for unrecognised ioctl codes. */ static long comedi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc; switch (cmd) { case COMEDI_DEVCONFIG: case COMEDI_DEVINFO: case COMEDI_SUBDINFO: case COMEDI_BUFCONFIG: case COMEDI_BUFINFO: /* Just need to translate the pointer argument. */ arg = (unsigned long)compat_ptr(arg); rc = comedi_unlocked_ioctl(file, cmd, arg); break; case COMEDI_LOCK: case COMEDI_UNLOCK: case COMEDI_CANCEL: case COMEDI_POLL: case COMEDI_SETRSUBD: case COMEDI_SETWSUBD: /* No translation needed. */ rc = comedi_unlocked_ioctl(file, cmd, arg); break; case COMEDI32_CHANINFO: rc = compat_chaninfo(file, arg); break; case COMEDI32_RANGEINFO: rc = compat_rangeinfo(file, arg); break; case COMEDI32_CMD: rc = compat_cmd(file, arg); break; case COMEDI32_CMDTEST: rc = compat_cmdtest(file, arg); break; case COMEDI32_INSNLIST: rc = compat_insnlist(file, arg); break; case COMEDI32_INSN: rc = compat_insn(file, arg); break; default: rc = -ENOIOCTLCMD; break; } return rc; } #else #define comedi_compat_ioctl NULL #endif static const struct file_operations comedi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = comedi_unlocked_ioctl, .compat_ioctl = comedi_compat_ioctl, .open = comedi_open, .release = comedi_close, .read = comedi_read, .write = comedi_write, .mmap = comedi_mmap, .poll = comedi_poll, .fasync = comedi_fasync, .llseek = noop_llseek, }; /** * comedi_event() - Handle events for asynchronous COMEDI command * @dev: COMEDI device. * @s: COMEDI subdevice. * Context: in_interrupt() (usually), @s->spin_lock spin-lock not held. * * If an asynchronous COMEDI command is active on the subdevice, process * any %COMEDI_CB_... event flags that have been set, usually by an * interrupt handler. These may change the run state of the asynchronous * command, wake a task, and/or send a %SIGIO signal. */ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; unsigned int events; int si_code = 0; unsigned long flags; spin_lock_irqsave(&s->spin_lock, flags); events = async->events; async->events = 0; if (!__comedi_is_subdevice_running(s)) { spin_unlock_irqrestore(&s->spin_lock, flags); return; } if (events & COMEDI_CB_CANCEL_MASK) __comedi_clear_subdevice_runflags(s, COMEDI_SRF_RUNNING); /* * Remember if an error event has occurred, so an error can be * returned the next time the user does a read() or write(). */ if (events & COMEDI_CB_ERROR_MASK) __comedi_set_subdevice_runflags(s, COMEDI_SRF_ERROR); if (async->cb_mask & events) { wake_up_interruptible(&async->wait_head); si_code = async->cmd.flags & CMDF_WRITE ? POLL_OUT : POLL_IN; } spin_unlock_irqrestore(&s->spin_lock, flags); if (si_code) kill_fasync(&dev->async_queue, SIGIO, si_code); } EXPORT_SYMBOL_GPL(comedi_event); /* Note: the ->mutex is pre-locked on successful return */ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device) { struct comedi_device *dev; struct device *csdev; unsigned int i; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); comedi_device_init(dev); comedi_set_hw_dev(dev, hardware_device); mutex_lock(&dev->mutex); mutex_lock(&comedi_board_minor_table_lock); for (i = hardware_device ? comedi_num_legacy_minors : 0; i < COMEDI_NUM_BOARD_MINORS; ++i) { if (!comedi_board_minor_table[i]) { comedi_board_minor_table[i] = dev; break; } } mutex_unlock(&comedi_board_minor_table_lock); if (i == COMEDI_NUM_BOARD_MINORS) { mutex_unlock(&dev->mutex); comedi_device_cleanup(dev); comedi_dev_put(dev); dev_err(hardware_device, "ran out of minor numbers for board device files\n"); return ERR_PTR(-EBUSY); } dev->minor = i; csdev = device_create(&comedi_class, hardware_device, MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i); if (!IS_ERR(csdev)) dev->class_dev = get_device(csdev); /* Note: dev->mutex needs to be unlocked by the caller. */ return dev; } void comedi_release_hardware_device(struct device *hardware_device) { int minor; struct comedi_device *dev; for (minor = comedi_num_legacy_minors; minor < COMEDI_NUM_BOARD_MINORS; minor++) { mutex_lock(&comedi_board_minor_table_lock); dev = comedi_board_minor_table[minor]; if (dev && dev->hw_dev == hardware_device) { comedi_board_minor_table[minor] = NULL; mutex_unlock(&comedi_board_minor_table_lock); comedi_free_board_dev(dev); break; } mutex_unlock(&comedi_board_minor_table_lock); } } int comedi_alloc_subdevice_minor(struct comedi_subdevice *s) { struct comedi_device *dev = s->device; struct device *csdev; unsigned int i; mutex_lock(&comedi_subdevice_minor_table_lock); for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) { if (!comedi_subdevice_minor_table[i]) { comedi_subdevice_minor_table[i] = s; break; } } mutex_unlock(&comedi_subdevice_minor_table_lock); if (i == COMEDI_NUM_SUBDEVICE_MINORS) { dev_err(dev->class_dev, "ran out of minor numbers for subdevice files\n"); return -EBUSY; } i += COMEDI_NUM_BOARD_MINORS; s->minor = i; csdev = device_create(&comedi_class, dev->class_dev, MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i_subd%i", dev->minor, s->index); if (!IS_ERR(csdev)) s->class_dev = csdev; return 0; } void comedi_free_subdevice_minor(struct comedi_subdevice *s) { unsigned int i; if (!s) return; if (s->minor < COMEDI_NUM_BOARD_MINORS || s->minor >= COMEDI_NUM_MINORS) return; i = s->minor - COMEDI_NUM_BOARD_MINORS; mutex_lock(&comedi_subdevice_minor_table_lock); if (s == comedi_subdevice_minor_table[i]) comedi_subdevice_minor_table[i] = NULL; mutex_unlock(&comedi_subdevice_minor_table_lock); if (s->class_dev) { device_destroy(&comedi_class, MKDEV(COMEDI_MAJOR, s->minor)); s->class_dev = NULL; } } static void comedi_cleanup_board_minors(void) { struct comedi_device *dev; unsigned int i; for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) { dev = comedi_clear_board_minor(i); comedi_free_board_dev(dev); } } static int __init comedi_init(void) { int i; int retval; pr_info("version " COMEDI_RELEASE " - http://www.comedi.org\n"); if (comedi_num_legacy_minors > COMEDI_NUM_BOARD_MINORS) { pr_err("invalid value for module parameter \"comedi_num_legacy_minors\". Valid values are 0 through %i.\n", COMEDI_NUM_BOARD_MINORS); return -EINVAL; } retval = register_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS, "comedi"); if (retval) return retval; cdev_init(&comedi_cdev, &comedi_fops); comedi_cdev.owner = THIS_MODULE; retval = kobject_set_name(&comedi_cdev.kobj, "comedi"); if (retval) goto out_unregister_chrdev_region; retval = cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); if (retval) goto out_unregister_chrdev_region; retval = class_register(&comedi_class); if (retval) { pr_err("failed to create class\n"); goto out_cdev_del; } /* create devices files for legacy/manual use */ for (i = 0; i < comedi_num_legacy_minors; i++) { struct comedi_device *dev; dev = comedi_alloc_board_minor(NULL); if (IS_ERR(dev)) { retval = PTR_ERR(dev); goto out_cleanup_board_minors; } /* comedi_alloc_board_minor() locked the mutex */ lockdep_assert_held(&dev->mutex); mutex_unlock(&dev->mutex); } /* XXX requires /proc interface */ comedi_proc_init(); return 0; out_cleanup_board_minors: comedi_cleanup_board_minors(); class_unregister(&comedi_class); out_cdev_del: cdev_del(&comedi_cdev); out_unregister_chrdev_region: unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); return retval; } module_init(comedi_init); static void __exit comedi_cleanup(void) { comedi_cleanup_board_minors(); class_unregister(&comedi_class); cdev_del(&comedi_cdev); unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); comedi_proc_cleanup(); } module_exit(comedi_cleanup); MODULE_AUTHOR("https://www.comedi.org"); MODULE_DESCRIPTION("Comedi core module"); MODULE_LICENSE("GPL"); |
150 149 146 145 146 146 146 146 143 146 146 146 146 146 146 151 151 151 150 151 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 | /* * mm/rmap.c - physical to virtual reverse mappings * * Copyright 2001, Rik van Riel <riel@conectiva.com.br> * Released under the General Public License (GPL). * * Simple, low overhead reverse mapping scheme. * Please try to keep this thing as modular as possible. * * Provides methods for unmapping each kind of mapped page: * the anon methods track anonymous pages, and * the file methods track pages belonging to an inode. * * Original design by Rik van Riel <riel@conectiva.com.br> 2001 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 * Contributions by Hugh Dickins 2003, 2004 */ /* * Lock ordering in mm: * * inode->i_rwsem (while writing or truncating, not reading or faulting) * mm->mmap_lock * mapping->invalidate_lock (in filemap_fault) * folio_lock * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) * vma_start_write * mapping->i_mmap_rwsem * anon_vma->rwsem * mm->page_table_lock or pte_lock * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) * mapping->private_lock (in block_dirty_folio) * i_pages lock (widely used) * lruvec->lru_lock (in folio_lruvec_lock_irq) * inode->i_lock (in set_page_dirty's __mark_inode_dirty) * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) * i_pages lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, * within bdi.wb->list_lock in __sync_single_inode) * * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) * ->tasklist_lock * pte map lock * * hugetlbfs PageHuge() take locks in this order: * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) * vma_lock (hugetlb specific lock for pmd_sharing) * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) * folio_lock */ #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/rcupdate.h> #include <linux/export.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/huge_mm.h> #include <linux/backing-dev.h> #include <linux/page_idle.h> #include <linux/memremap.h> #include <linux/userfaultfd_k.h> #include <linux/mm_inline.h> #include <linux/oom.h> #include <asm/tlbflush.h> #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> #include <trace/events/migrate.h> #include "internal.h" static struct kmem_cache *anon_vma_cachep; static struct kmem_cache *anon_vma_chain_cachep; static inline struct anon_vma *anon_vma_alloc(void) { struct anon_vma *anon_vma; anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); anon_vma->num_children = 0; anon_vma->num_active_vmas = 0; anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. */ anon_vma->root = anon_vma; } return anon_vma; } static inline void anon_vma_free(struct anon_vma *anon_vma) { VM_BUG_ON(atomic_read(&anon_vma->refcount)); /* * Synchronize against folio_lock_anon_vma_read() such that * we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by * down_read_trylock() from folio_lock_anon_vma_read(). This orders: * * folio_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() * LOCK MB * atomic_read() rwsem_is_locked() * * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ might_sleep(); if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); anon_vma_unlock_write(anon_vma); } kmem_cache_free(anon_vma_cachep, anon_vma); } static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) { return kmem_cache_alloc(anon_vma_chain_cachep, gfp); } static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) { kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); } static void anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) { avc->vma = vma; avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); } /** * __anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, which * is handled inline by anon_vma_prepare(). But if * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have * optimistically looked up an anon_vma in folio_lock_anon_vma_read() * and that may actually touch the rwsem even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * * As a result, we need to do proper anon_vma locking even * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. */ int __anon_vma_prepare(struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; struct anon_vma *anon_vma, *allocated; struct anon_vma_chain *avc; mmap_assert_locked(mm); might_sleep(); avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_enomem; anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) goto out_enomem_free_avc; anon_vma->num_children++; /* self-parent link for new root */ allocated = anon_vma; } anon_vma_lock_write(anon_vma); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); anon_vma->num_active_vmas++; allocated = NULL; avc = NULL; } spin_unlock(&mm->page_table_lock); anon_vma_unlock_write(anon_vma); if (unlikely(allocated)) put_anon_vma(allocated); if (unlikely(avc)) anon_vma_chain_free(avc); return 0; out_enomem_free_avc: anon_vma_chain_free(avc); out_enomem: return -ENOMEM; } /* * This is a useful helper function for locking the anon_vma root as * we traverse the vma->anon_vma_chain, looping over anon_vma's that * have the same vma. * * Such anon_vma's should have the same root, so you'd expect to see * just a single mutex_lock for the whole traversal. */ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) { struct anon_vma *new_root = anon_vma->root; if (new_root != root) { if (WARN_ON_ONCE(root)) up_write(&root->rwsem); root = new_root; down_write(&root->rwsem); } return root; } static inline void unlock_anon_vma_root(struct anon_vma *root) { if (root) up_write(&root->rwsem); } /* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. * * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before * call, we can identify this case by checking (!dst->anon_vma && * src->anon_vma). * * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find * and reuse existing anon_vma which has no vmas and only one child anon_vma. * This prevents degradation of anon_vma hierarchy to endless linear chain in * case of constantly forking task. On the other hand, an anon_vma with more * than one child isn't reused even if there was no alive vma, thus rmap * walker has a good chance of avoiding scanning the whole hierarchy when it * searches where page is mapped. */ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; struct anon_vma *root = NULL; list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { struct anon_vma *anon_vma; avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!avc)) { unlock_anon_vma_root(root); root = NULL; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto enomem_failure; } anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); /* * Reuse existing anon_vma if it has no vma and only one * anon_vma child. * * Root anon_vma is never reused: * it has self-parent reference and at least one child. */ if (!dst->anon_vma && src->anon_vma && anon_vma->num_children < 2 && anon_vma->num_active_vmas == 0) dst->anon_vma = anon_vma; } if (dst->anon_vma) dst->anon_vma->num_active_vmas++; unlock_anon_vma_root(root); return 0; enomem_failure: /* * dst->anon_vma is dropped here otherwise its num_active_vmas can * be incorrectly decremented in unlink_anon_vmas(). * We can safely do this because callers of anon_vma_clone() don't care * about dst->anon_vma if anon_vma_clone() failed. */ dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; } /* * Attach vma to its own anon_vma, as well as to the anon_vmas that * the corresponding VMA in the parent process is attached to. * Returns 0 on success, non-zero on failure. */ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) { struct anon_vma_chain *avc; struct anon_vma *anon_vma; int error; /* Don't bother if the parent process has no anon_vma here. */ if (!pvma->anon_vma) return 0; /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ vma->anon_vma = NULL; /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. */ error = anon_vma_clone(vma, pvma); if (error) return error; /* An existing anon_vma has been reused, all done then. */ if (vma->anon_vma) return 0; /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; anon_vma->num_active_vmas++; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_error_free_anon_vma; /* * The root anon_vma's rwsem is the lock actually used when we * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until * this anon_vma is freed, because the lock lives in the root. */ get_anon_vma(anon_vma->root); /* Mark this anon_vma as the one where our new (COWed) pages go. */ vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); anon_vma->parent->num_children++; anon_vma_unlock_write(anon_vma); return 0; out_error_free_anon_vma: put_anon_vma(anon_vma); out_error: unlink_anon_vmas(vma); return -ENOMEM; } void unlink_anon_vmas(struct vm_area_struct *vma) { struct anon_vma_chain *avc, *next; struct anon_vma *root = NULL; /* * Unlink each anon_vma chained to the VMA. This list is ordered * from newest to oldest, ensuring the root anon_vma gets freed last. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); /* * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { anon_vma->parent->num_children--; continue; } list_del(&avc->same_vma); anon_vma_chain_free(avc); } if (vma->anon_vma) { vma->anon_vma->num_active_vmas--; /* * vma would still be needed after unlink, and anon_vma will be prepared * when handle fault. */ vma->anon_vma = NULL; } unlock_anon_vma_root(root); /* * Iterate the list once more, it now only contains empty and unlinked * anon_vmas, destroy them. Could not do before due to __put_anon_vma() * needing to write-acquire the anon_vma->root->rwsem. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; VM_WARN_ON(anon_vma->num_children); VM_WARN_ON(anon_vma->num_active_vmas); put_anon_vma(anon_vma); list_del(&avc->same_vma); anon_vma_chain_free(avc); } } static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; init_rwsem(&anon_vma->rwsem); atomic_set(&anon_vma->refcount, 0); anon_vma->rb_root = RB_ROOT_CACHED; } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC|SLAB_ACCOUNT); } /* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * * Since there is no serialization what so ever against folio_remove_rmap_*() * the best this function can do is return a refcount increased anon_vma * that might have been relevant to this page. * * The page might have been remapped to a different anon_vma or the anon_vma * returned may already be freed (and even reused). * * In case it was remapped to a different anon_vma, the new anon_vma will be a * child of the old anon_vma, and the anon_vma lifetime rules will therefore * ensure that any anon_vma obtained from the page will still be valid for as * long as we observe page_mapped() [ hence all those page_mapped() tests ]. * * All users of this function must be very careful when walking the anon_vma * chain and verify that the page in question is indeed mapped in it * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid * if there is a mapcount, we can dereference the anon_vma after observing * those. * * NOTE: the caller should normally hold folio lock when calling this. If * not, the caller needs to double check the anon_vma didn't change after * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it * concurrently without folio lock protection). See folio_lock_anon_vma_read() * which has already covered that, and comment above remap_pages(). */ struct anon_vma *folio_get_anon_vma(const struct folio *folio) { struct anon_vma *anon_vma = NULL; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } /* * If this folio is still mapped, then its anon_vma cannot have been * freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() * above cannot corrupt). */ if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } out: rcu_read_unlock(); return anon_vma; } /* * Similar to folio_get_anon_vma() except it locks the anon_vma. * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with folio_get_anon_vma() and then block on the mutex * on !rwc->try_lock case. */ struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; retry: rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* * folio_move_anon_rmap() might have changed the anon_vma as we * might not hold the folio lock here. */ if (unlikely((unsigned long)READ_ONCE(folio->mapping) != anon_mapping)) { up_read(&root_anon_vma->rwsem); rcu_read_unlock(); goto retry; } /* * If the folio is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ if (!folio_mapped(folio)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } goto out; } if (rwc && rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } /* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } /* we pinned the anon_vma, its safe to sleep */ rcu_read_unlock(); anon_vma_lock_read(anon_vma); /* * folio_move_anon_rmap() might have changed the anon_vma as we might * not hold the folio lock here. */ if (unlikely((unsigned long)READ_ONCE(folio->mapping) != anon_mapping)) { anon_vma_unlock_read(anon_vma); put_anon_vma(anon_vma); anon_vma = NULL; goto retry; } if (atomic_dec_and_test(&anon_vma->refcount)) { /* * Oops, we held the last refcount, release the lock * and bail -- can't simply use put_anon_vma() because * we'll deadlock on the anon_vma_lock_write() recursion. */ anon_vma_unlock_read(anon_vma); __put_anon_vma(anon_vma); anon_vma = NULL; } return anon_vma; out: rcu_read_unlock(); return anon_vma; } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed * before any IO is initiated on the page to prevent lost writes. Similarly, * it must be flushed before freeing to prevent data leakage. */ void try_to_unmap_flush(void) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; if (!tlb_ubc->flush_required) return; arch_tlbbatch_flush(&tlb_ubc->arch); tlb_ubc->flush_required = false; tlb_ubc->writable = false; } /* Flush iff there are potentially writable TLB entries that can race with IO */ void try_to_unmap_flush_dirty(void) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; if (tlb_ubc->writable) try_to_unmap_flush(); } /* * Bits 0-14 of mm->tlb_flush_batched record pending generations. * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. */ #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 #define TLB_FLUSH_BATCH_PENDING_MASK \ ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) #define TLB_FLUSH_BATCH_PENDING_LARGE \ (TLB_FLUSH_BATCH_PENDING_MASK / 2) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long start, unsigned long end) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; int batch; bool writable = pte_dirty(pteval); if (!pte_accessible(mm, pteval)) return; arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end); tlb_ubc->flush_required = true; /* * Ensure compiler does not re-order the setting of tlb_flush_batched * before the PTE is cleared. */ barrier(); batch = atomic_read(&mm->tlb_flush_batched); retry: if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { /* * Prevent `pending' from catching up with `flushed' because of * overflow. Reset `pending' and `flushed' to be 1 and 0 if * `pending' becomes large. */ if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) goto retry; } else { atomic_inc(&mm->tlb_flush_batched); } /* * If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * before the page is queued for IO. */ if (writable) tlb_ubc->writable = true; } /* * Returns true if the TLB flush should be deferred to the end of a batch of * unmap operations to reduce IPIs. */ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { if (!(flags & TTU_BATCH_FLUSH)) return false; return arch_tlbbatch_should_defer(mm); } /* * Reclaim unmaps pages under the PTL but do not flush the TLB prior to * releasing the PTL if TLB flushes are batched. It's possible for a parallel * operation such as mprotect or munmap to race between reclaim unmapping * the page and flushing the page. If this race occurs, it potentially allows * access to data via a stale TLB entry. Tracking all mm's that have TLB * batching in flight would be expensive during reclaim so instead track * whether TLB batching occurred in the past and if so then do a flush here * if required. This will cost one additional flush per reclaim cycle paid * by the first operation at risk such as mprotect and mumap. * * This must be called under the PTL so that an access to tlb_flush_batched * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise * via the PTL. */ void flush_tlb_batched_pending(struct mm_struct *mm) { int batch = atomic_read(&mm->tlb_flush_batched); int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; if (pending != flushed) { arch_flush_tlb_batched_pending(mm); /* * If the new TLB flushing is pending during flushing, leave * mm->tlb_flush_batched as is, to avoid losing flushing. */ atomic_cmpxchg(&mm->tlb_flush_batched, batch, pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); } } #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long start, unsigned long end) { } static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { return false; } #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ /** * page_address_in_vma - The virtual address of a page in this VMA. * @folio: The folio containing the page. * @page: The page within the folio. * @vma: The VMA we need to know the address in. * * Calculates the user virtual address of this page in the specified VMA. * It is the caller's responsibility to check the page is actually * within the VMA. There may not currently be a PTE pointing at this * page, but if a page fault occurs at this address, this is the page * which will be accessed. * * Context: Caller should hold a reference to the folio. Caller should * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the * VMA from being altered. * * Return: The virtual address corresponding to this page in the VMA. */ unsigned long page_address_in_vma(const struct folio *folio, const struct page *page, const struct vm_area_struct *vma) { if (folio_test_anon(folio)) { struct anon_vma *anon_vma = folio_anon_vma(folio); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. */ if (!vma->anon_vma || !anon_vma || vma->anon_vma->root != anon_vma->root) return -EFAULT; } else if (!vma->vm_file) { return -EFAULT; } else if (vma->vm_file->f_mapping != folio->mapping) { return -EFAULT; } /* KSM folios don't reach here because of the !anon_vma check */ return vma_address(vma, page_pgoff(folio, page), 1); } /* * Returns the actual pmd_t* where we expect 'address' to be mapped from, or * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* * represents. */ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) goto out; pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); out: return pmd; } struct folio_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; /* * arg: folio_referenced_arg will be passed */ static bool folio_referenced_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; unsigned long start = address, ptes = 0; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; if (vma->vm_flags & VM_LOCKED) { if (!folio_test_large(folio) || !pvmw.pte) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma); page_vma_mapped_walk_done(&pvmw); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ } /* * For large folio fully mapped to VMA, will * be handled after the pvmw loop. * * For large folio cross VMA boundaries, it's * expected to be picked by page reclaim. But * should skip reference of pages which are in * the range of VM_LOCKED vma. As page reclaim * should just count the reference of pages out * the range of VM_LOCKED vma. */ ptes++; pra->mapcount--; continue; } /* * Skip the non-shared swapbacked folio mapped solely by * the exiting or OOM-reaped process. This avoids redundant * swap-out followed by an immediate unmap. */ if ((!atomic_read(&vma->vm_mm->mm_users) || check_stable_address_space(vma->vm_mm)) && folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_maybe_mapped_shared(folio)) { pra->referenced = -1; page_vma_mapped_walk_done(&pvmw); return false; } if (lru_gen_enabled() && pvmw.pte) { if (lru_gen_look_around(&pvmw)) referenced++; } else if (pvmw.pte) { if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) referenced++; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) referenced++; } else { /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); } pra->mapcount--; } if ((vma->vm_flags & VM_LOCKED) && folio_test_large(folio) && folio_within_vma(folio, vma)) { unsigned long s_align, e_align; s_align = ALIGN_DOWN(start, PMD_SIZE); e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); /* folio doesn't cross page table boundary and fully mapped */ if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ } } if (referenced) folio_clear_idle(folio); if (folio_test_clear_young(folio)) referenced++; if (referenced) { pra->referenced++; pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; } if (!pra->mapcount) return false; /* To break the loop */ return true; } static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) { struct folio_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; /* * Ignore references from this mapping if it has no recency. If the * folio has been used in another mapping, we will catch it; if this * other mapping is already gone, the unmap path will have set the * referenced flag or activated the folio in zap_pte_range(). */ if (!vma_has_recency(vma)) return true; /* * If we are reclaiming on behalf of a cgroup, skip counting on behalf * of references from different cgroups. */ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) return true; return false; } /** * folio_referenced() - Test if the folio was referenced. * @folio: The folio to test. * @is_locked: Caller holds lock on the folio. * @memcg: target memory cgroup * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. * * Quick test_and_clear_referenced for all mappings of a folio, * * Return: The number of mappings which referenced the folio. Return -1 if * the function bailed out due to rmap lock contention. */ int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { bool we_locked = false; struct folio_referenced_arg pra = { .mapcount = folio_mapcount(folio), .memcg = memcg, }; struct rmap_walk_control rwc = { .rmap_one = folio_referenced_one, .arg = (void *)&pra, .anon_lock = folio_lock_anon_vma_read, .try_lock = true, .invalid_vma = invalid_folio_referenced_vma, }; *vm_flags = 0; if (!pra.mapcount) return 0; if (!folio_raw_mapping(folio)) return 0; if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { we_locked = folio_trylock(folio); if (!we_locked) return 1; } rmap_walk(folio, &rwc); *vm_flags = pra.vm_flags; if (we_locked) folio_unlock(folio); return rwc.contended ? -1 : pra.referenced; } static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) { int cleaned = 0; struct vm_area_struct *vma = pvmw->vma; struct mmu_notifier_range range; unsigned long address = pvmw->address; /* * We have to assume the worse case ie pmd for invalidation. Note that * the folio can not be freed from this function. */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma->vm_mm, address, vma_address_end(pvmw)); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(pvmw)) { int ret = 0; address = pvmw->address; if (pvmw->pte) { pte_t *pte = pvmw->pte; pte_t entry = ptep_get(pte); /* * PFN swap PTEs, such as device-exclusive ones, that * actually map pages are clean and not writable from a * CPU perspective. The MMU notifier takes care of any * device aspects. */ if (!pte_present(entry)) continue; if (!pte_dirty(entry) && !pte_write(entry)) continue; flush_cache_page(vma, address, pte_pfn(entry)); entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(vma->vm_mm, address, pte, entry); ret = 1; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t *pmd = pvmw->pmd; pmd_t entry; if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) continue; flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); entry = pmdp_invalidate(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); set_pmd_at(vma->vm_mm, address, pmd, entry); ret = 1; #else /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); #endif } if (ret) cleaned++; } mmu_notifier_invalidate_range_end(&range); return cleaned; } static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); int *cleaned = arg; *cleaned += page_vma_mkclean_one(&pvmw); return true; } static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) { if (vma->vm_flags & VM_SHARED) return false; return true; } int folio_mkclean(struct folio *folio) { int cleaned = 0; struct address_space *mapping; struct rmap_walk_control rwc = { .arg = (void *)&cleaned, .rmap_one = page_mkclean_one, .invalid_vma = invalid_mkclean_vma, }; BUG_ON(!folio_test_locked(folio)); if (!folio_mapped(folio)) return 0; mapping = folio_mapping(folio); if (!mapping) return 0; rmap_walk(folio, &rwc); return cleaned; } EXPORT_SYMBOL_GPL(folio_mkclean); struct wrprotect_file_state { int cleaned; pgoff_t pgoff; unsigned long pfn; unsigned long nr_pages; }; static bool mapping_wrprotect_range_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg; struct page_vma_mapped_walk pvmw = { .pfn = state->pfn, .nr_pages = state->nr_pages, .pgoff = state->pgoff, .vma = vma, .address = address, .flags = PVMW_SYNC, }; state->cleaned += page_vma_mkclean_one(&pvmw); return true; } static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, pgoff_t pgoff_start, unsigned long nr_pages, struct rmap_walk_control *rwc, bool locked); /** * mapping_wrprotect_range() - Write-protect all mappings in a specified range. * * @mapping: The mapping whose reverse mapping should be traversed. * @pgoff: The page offset at which @pfn is mapped within @mapping. * @pfn: The PFN of the page mapped in @mapping at @pgoff. * @nr_pages: The number of physically contiguous base pages spanned. * * Traverses the reverse mapping, finding all VMAs which contain a shared * mapping of the pages in the specified range in @mapping, and write-protects * them (that is, updates the page tables to mark the mappings read-only such * that a write protection fault arises when the mappings are written to). * * The @pfn value need not refer to a folio, but rather can reference a kernel * allocation which is mapped into userland. We therefore do not require that * the page maps to a folio with a valid mapping or index field, rather the * caller specifies these in @mapping and @pgoff. * * Return: the number of write-protected PTEs, or an error. */ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, unsigned long pfn, unsigned long nr_pages) { struct wrprotect_file_state state = { .cleaned = 0, .pgoff = pgoff, .pfn = pfn, .nr_pages = nr_pages, }; struct rmap_walk_control rwc = { .arg = (void *)&state, .rmap_one = mapping_wrprotect_range_one, .invalid_vma = invalid_mkclean_vma, }; if (!mapping) return 0; __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, /* locked = */false); return state.cleaned; } EXPORT_SYMBOL_GPL(mapping_wrprotect_range); /** * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) * within the @vma of shared mappings. And since clean PTEs * should also be readonly, write protects them too. * @pfn: start pfn. * @nr_pages: number of physically contiguous pages srarting with @pfn. * @pgoff: page offset that the @pfn mapped with. * @vma: vma that @pfn mapped within. * * Returns the number of cleaned PTEs (including PMDs). */ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma) { struct page_vma_mapped_walk pvmw = { .pfn = pfn, .nr_pages = nr_pages, .pgoff = pgoff, .vma = vma, .flags = PVMW_SYNC, }; if (invalid_mkclean_vma(vma, NULL)) return 0; pvmw.address = vma_address(vma, pgoff, nr_pages); VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); return page_vma_mkclean_one(&pvmw); } static __always_inline unsigned int __folio_add_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level, int *nr_pmdmapped) { atomic_t *mapped = &folio->_nr_pages_mapped; const int orig_nr_pages = nr_pages; int first = 0, nr = 0; __folio_rmap_sanity_checks(folio, page, nr_pages, level); switch (level) { case RMAP_LEVEL_PTE: if (!folio_test_large(folio)) { nr = atomic_inc_and_test(&folio->_mapcount); break; } if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); if (nr == orig_nr_pages) /* Was completely unmapped. */ nr = folio_large_nr_pages(folio); else nr = 0; break; } do { first += atomic_inc_and_test(&page->_mapcount); } while (page++, --nr_pages > 0); if (first && atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) nr = first; folio_add_large_mapcount(folio, orig_nr_pages, vma); break; case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: first = atomic_inc_and_test(&folio->_entire_mapcount); if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { if (level == RMAP_LEVEL_PMD && first) *nr_pmdmapped = folio_large_nr_pages(folio); nr = folio_inc_return_large_mapcount(folio, vma); if (nr == 1) /* Was completely unmapped. */ nr = folio_large_nr_pages(folio); else nr = 0; break; } if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { nr_pages = folio_large_nr_pages(folio); /* * We only track PMD mappings of PMD-sized * folios separately. */ if (level == RMAP_LEVEL_PMD) *nr_pmdmapped = nr_pages; nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; } else { /* Raced ahead of a remove of ENTIRELY_MAPPED */ nr = 0; } } folio_inc_large_mapcount(folio, vma); break; } return nr; } /** * folio_move_anon_rmap - move a folio to our anon_vma * @folio: The folio to move to our anon_vma * @vma: The vma the folio belongs to * * When a folio belongs exclusively to one process after a COW event, * that folio can be moved into the anon_vma that belongs to just that * process, so the rmap code will not search the parent or sibling processes. */ void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { void *anon_vma = vma->anon_vma; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_VMA(!anon_vma, vma); anon_vma += PAGE_MAPPING_ANON; /* * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written * simultaneously, so a concurrent reader (eg folio_referenced()'s * folio_test_anon()) will not see one without the other. */ WRITE_ONCE(folio->mapping, anon_vma); } /** * __folio_set_anon - set up a new anonymous rmap for a folio * @folio: The folio to set up the new anonymous rmap for. * @vma: VM area to add the folio to. * @address: User virtual address of the mapping * @exclusive: Whether the folio is exclusive to the process. */ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, unsigned long address, bool exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); /* * If the folio isn't exclusive to this vma, we must use the _oldest_ * possible anon_vma for the folio mapping! */ if (!exclusive) anon_vma = anon_vma->root; /* * page_idle does a lockless/optimistic rmap scan on folio->mapping. * Make sure the compiler doesn't split the stores of anon_vma and * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code * could mistake the mapping for a struct address_space and crash. */ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); folio->index = linear_page_index(vma, address); } /** * __page_check_anon_rmap - sanity check anonymous rmap addition * @folio: The folio containing @page. * @page: the page to check the mapping of * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ static void __page_check_anon_rmap(const struct folio *folio, const struct page *page, struct vm_area_struct *vma, unsigned long address) { /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * * We have exclusion against folio_add_anon_rmap_*() because the caller * always holds the page locked. * * We have exclusion against folio_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked * over the call to folio_add_new_anon_rmap. */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), page); } static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) { int idx; if (nr) { idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; __lruvec_stat_mod_folio(folio, idx, nr); } if (nr_pmdmapped) { if (folio_test_anon(folio)) { idx = NR_ANON_THPS; __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); } else { /* NR_*_PMDMAPPED are not maintained per-memcg */ idx = folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED; __mod_node_page_state(folio_pgdat(folio), idx, nr_pmdmapped); } } } static __always_inline void __folio_add_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags, enum rmap_level level) { int i, nr, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped); if (likely(!folio_test_ksm(folio))) __page_check_anon_rmap(folio, page, vma, address); __folio_mod_stat(folio, nr, nr_pmdmapped); if (flags & RMAP_EXCLUSIVE) { switch (level) { case RMAP_LEVEL_PTE: for (i = 0; i < nr_pages; i++) SetPageAnonExclusive(page + i); break; case RMAP_LEVEL_PMD: SetPageAnonExclusive(page); break; case RMAP_LEVEL_PUD: /* * Keep the compiler happy, we don't support anonymous * PUD mappings. */ WARN_ON_ONCE(1); break; } } VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) && atomic_read(&folio->_mapcount) > 0, folio); for (i = 0; i < nr_pages; i++) { struct page *cur_page = page + i; VM_WARN_ON_FOLIO(folio_test_large(folio) && folio_entire_mapcount(folio) > 1 && PageAnonExclusive(cur_page), folio); if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) continue; /* * While PTE-mapping a THP we have a PMD and a PTE * mapping. */ VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 && PageAnonExclusive(cur_page), folio); } /* * For large folio, only mlock it if it's fully mapped to VMA. It's * not easy to check whether the large folio is fully mapped to VMA * here. Only mlock normal 4K folio and leave page reclaim to handle * large folio. */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } /** * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio * @folio: The folio to add the mappings to * @page: The first page to add * @nr_pages: The number of pages which will be mapped * @vma: The vm area in which the mappings are added * @address: The user virtual address of the first page to map * @flags: The rmap flags * * The page range of folio is defined by [first_page, first_page + nr_pages) * * The caller needs to hold the page table lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting, * and to ensure that an anon folio is not being upgraded racily to a KSM folio * (but KSM folios are never downgraded). */ void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, RMAP_LEVEL_PTE); } /** * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio * @folio: The folio to add the mapping to * @page: The first page to add * @vma: The vm area in which the mapping is added * @address: The user virtual address of the first page to map * @flags: The rmap flags * * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting. */ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } /** * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * @flags: The rmap flags * * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. * The folio doesn't necessarily need to be locked while it's exclusive * unless two threads map it concurrently. However, the folio must be * locked if it's shared. * * If the folio is pmd-mappable, it is accounted as a THP. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { const bool exclusive = flags & RMAP_EXCLUSIVE; int nr = 1, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); /* * VM_DROPPABLE mappings don't swap; instead they're just dropped when * under memory pressure. */ if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) __folio_set_swapbacked(folio); __folio_set_anon(folio, vma, address, exclusive); if (likely(!folio_test_large(folio))) { /* increment count (starts at -1) */ atomic_set(&folio->_mapcount, 0); if (exclusive) SetPageAnonExclusive(&folio->page); } else if (!folio_test_pmd_mappable(folio)) { int i; nr = folio_large_nr_pages(folio); for (i = 0; i < nr; i++) { struct page *page = folio_page(folio, i); if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); if (exclusive) SetPageAnonExclusive(page); } folio_set_large_mapcount(folio, nr, vma); if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) atomic_set(&folio->_nr_pages_mapped, nr); } else { nr = folio_large_nr_pages(folio); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_set_large_mapcount(folio, 1, vma); if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); if (exclusive) SetPageAnonExclusive(&folio->page); nr_pmdmapped = nr; } VM_WARN_ON_ONCE(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end); __folio_mod_stat(folio, nr, nr_pmdmapped); mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); } static __always_inline void __folio_add_file_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { int nr, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped); __folio_mod_stat(folio, nr, nr_pmdmapped); /* See comments in folio_add_anon_rmap_*() */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } /** * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio * @folio: The folio to add the mappings to * @page: The first page to add * @nr_pages: The number of pages that will be mapped using PTEs * @vma: The vm area in which the mappings are added * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma) { __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } /** * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio * @folio: The folio to add the mapping to * @page: The first page to add * @vma: The vm area in which the mapping is added * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } /** * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio * @folio: The folio to add the mapping to * @page: The first page to add * @vma: The vm area in which the mapping is added * * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) * * The caller needs to hold the page table lock. */ void folio_add_file_rmap_pud(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD); #else WARN_ON_ONCE(true); #endif } static __always_inline void __folio_remove_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { atomic_t *mapped = &folio->_nr_pages_mapped; int last = 0, nr = 0, nr_pmdmapped = 0; bool partially_mapped = false; __folio_rmap_sanity_checks(folio, page, nr_pages, level); switch (level) { case RMAP_LEVEL_PTE: if (!folio_test_large(folio)) { nr = atomic_add_negative(-1, &folio->_mapcount); break; } if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); if (!nr) { /* Now completely unmapped. */ nr = folio_nr_pages(folio); } else { partially_mapped = nr < folio_large_nr_pages(folio) && !folio_entire_mapcount(folio); nr = 0; } break; } folio_sub_large_mapcount(folio, nr_pages, vma); do { last += atomic_add_negative(-1, &page->_mapcount); } while (page++, --nr_pages > 0); if (last && atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED) nr = last; partially_mapped = nr && atomic_read(mapped); break; case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { last = atomic_add_negative(-1, &folio->_entire_mapcount); if (level == RMAP_LEVEL_PMD && last) nr_pmdmapped = folio_large_nr_pages(folio); nr = folio_dec_return_large_mapcount(folio, vma); if (!nr) { /* Now completely unmapped. */ nr = folio_large_nr_pages(folio); } else { partially_mapped = last && nr < folio_large_nr_pages(folio); nr = 0; } break; } folio_dec_large_mapcount(folio, vma); last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED)) { nr_pages = folio_large_nr_pages(folio); if (level == RMAP_LEVEL_PMD) nr_pmdmapped = nr_pages; nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; } else { /* An add of ENTIRELY_MAPPED raced ahead */ nr = 0; } } partially_mapped = nr && nr < nr_pmdmapped; break; } /* * Queue anon large folio for deferred split if at least one page of * the folio is unmapped and at least one page is still mapped. * * Check partially_mapped first to ensure it is a large folio. */ if (partially_mapped && folio_test_anon(folio) && !folio_test_partially_mapped(folio)) deferred_split_folio(folio, true); __folio_mod_stat(folio, -nr, -nr_pmdmapped); /* * It would be tidy to reset folio_test_anon mapping when fully * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() * which increments mapcount after us but sets mapping before us: * so leave the reset to free_pages_prepare, and remember that * it's only reliable while mapped. */ munlock_vma_folio(folio, vma); } /** * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio * @folio: The folio to remove the mappings from * @page: The first page to remove * @nr_pages: The number of pages that will be removed from the mapping * @vma: The vm area from which the mappings are removed * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ void folio_remove_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma) { __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } /** * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio * @folio: The folio to remove the mapping from * @page: The first page to remove * @vma: The vm area from which the mapping is removed * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ void folio_remove_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } /** * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio * @folio: The folio to remove the mapping from * @page: The first page to remove * @vma: The vm area from which the mapping is removed * * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) * * The caller needs to hold the page table lock. */ void folio_remove_rmap_pud(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD); #else WARN_ON_ONCE(true); #endif } /* We support batch unmapping of PTEs for lazyfree large folios */ static inline bool can_batch_unmap_folio_ptes(unsigned long addr, struct folio *folio, pte_t *ptep) { const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; int max_nr = folio_nr_pages(folio); pte_t pte = ptep_get(ptep); if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) return false; if (pte_unused(pte)) return false; if (pte_pfn(pte) != folio_pfn(folio)) return false; return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, NULL, NULL) == max_nr; } /* * @arg: enum ttu_flags will be passed to this argument */ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); bool anon_exclusive, ret = true; pte_t pteval; struct page *subpage; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long nr_pages = 1, end_addr; unsigned long pfn; unsigned long hsz = 0; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_unmap() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the folio can not be freed in this function as call of * try_to_unmap() must hold a reference on the folio. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* We need the huge page size for set_huge_pte_at() */ hsz = huge_page_size(hstate_vma(vma)); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* * If the folio is in an mlock()d vma, we must not swap it out. */ if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Restore the mlock which got missed */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); goto walk_abort; } if (!pvmw.pte) { if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) goto walk_done; /* * unmap_huge_pmd_locked has either already marked * the folio as swap-backed or decided to retain it * due to GUP or speculative references. */ goto walk_abort; } if (flags & TTU_SPLIT_HUGE_PMD) { /* * We temporarily have to drop the PTL and * restart so we can process the PTE-mapped THP. */ split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, false); flags &= ~TTU_SPLIT_HUGE_PMD; page_vma_mapped_walk_restart(&pvmw); continue; } } /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); /* * Handle PFN swap PTEs, such as device-exclusive ones, that * actually map pages. */ pteval = ptep_get(pvmw.pte); if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * The try_to_unmap() is only passed a hugetlb page * in the case where the hugetlb page is poisoned. */ VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and fail * if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) goto walk_abort; if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ goto walk_done; } hugetlb_vma_unlock_write(vma); } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); if (pte_dirty(pteval)) folio_mark_dirty(folio); } else if (likely(pte_present(pteval))) { if (folio_test_large(folio) && !(flags & TTU_HWPOISON) && can_batch_unmap_folio_ptes(address, folio, pvmw.pte)) nr_pages = folio_nr_pages(folio); end_addr = address + nr_pages * PAGE_SIZE; flush_cache_range(vma, address, end_addr); /* Nuke the page table entry. */ pteval = get_and_clear_full_ptes(mm, address, pvmw.pte, nr_pages, 0); /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ if (should_defer_flush(mm, flags)) set_tlb_ubc_flush_pending(mm, pteval, address, end_addr); else flush_tlb_range(vma, address, end_addr); if (pte_dirty(pteval)) folio_mark_dirty(folio); } else { pte_clear(mm, address, pvmw.pte); } /* * Now the pte is cleared. If this pte was uffd-wp armed, * we may want to replace a none pte with a marker pte if * it's file-backed, so we don't lose the tracking info. */ pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (likely(pte_present(pteval)) && pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(folio)); } else if (folio_test_anon(folio)) { swp_entry_t entry = page_swap_entry(subpage); pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ if (unlikely(folio_test_swapbacked(folio) != folio_test_swapcache(folio))) { WARN_ON_ONCE(1); goto walk_abort; } /* MADV_FREE page check */ if (!folio_test_swapbacked(folio)) { int ref_count, map_count; /* * Synchronize with gup_pte_range(): * - clear PTE; barrier; read refcount * - inc refcount; barrier; read PTE */ smp_mb(); ref_count = folio_ref_count(folio); map_count = folio_mapcount(folio); /* * Order reads for page refcount and dirty flag * (see comments in __remove_mapping()). */ smp_rmb(); if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { /* * redirtied either using the page table or a previously * obtained GUP reference. */ set_ptes(mm, address, pvmw.pte, pteval, nr_pages); folio_set_swapbacked(folio); goto walk_abort; } else if (ref_count != 1 + map_count) { /* * Additional reference. Could be a GUP reference or any * speculative reference. GUP users must mark the folio * dirty if there was a modification. This folio cannot be * reclaimed right now either way, so act just like nothing * happened. * We'll come back here later and detect if the folio was * dirtied when the additional reference is gone. */ set_ptes(mm, address, pvmw.pte, pteval, nr_pages); goto walk_abort; } add_mm_counter(mm, MM_ANONPAGES, -nr_pages); goto discard; } if (swap_duplicate(entry) < 0) { set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } /* * arch_unmap_one() is expected to be a NOP on * architectures where we could have PFN swap PTEs, * so we'll not check/care. */ if (arch_unmap_one(mm, vma, address, pteval) < 0) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } /* See folio_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, subpage)) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } dec_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_SWAPENTS); swp_pte = swp_entry_to_pte(entry); if (anon_exclusive) swp_pte = pte_swp_mkexclusive(swp_pte); if (likely(pte_present(pteval))) { if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); } else { if (pte_swp_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_swp_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); } set_pte_at(mm, address, pvmw.pte, swp_pte); } else { /* * This is a locked file-backed folio, * so it cannot be removed from the page * cache and replaced by a new folio before * mmu_notifier_invalidate_range_end, so no * concurrent thread might update its page table * to point at a new folio while a device is * still using this folio. * * See Documentation/mm/mmu_notifier.rst */ dec_mm_counter(mm, mm_counter_file(folio)); } discard: if (unlikely(folio_test_hugetlb(folio))) { hugetlb_remove_rmap(folio); } else { folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); folio_ref_sub(folio, nr_pages - 1); } if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); /* We have already batched the entire folio */ if (nr_pages > 1) goto walk_done; continue; walk_abort: ret = false; walk_done: page_vma_mapped_walk_done(&pvmw); break; } mmu_notifier_invalidate_range_end(&range); return ret; } static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) { return vma_is_temporary_stack(vma); } static int folio_not_mapped(struct folio *folio) { return !folio_mapped(folio); } /** * try_to_unmap - Try to remove all page table mappings to a folio. * @folio: The folio to unmap. * @flags: action and flags * * Tries to remove all the page table entries which are mapping this * folio. It is the caller's responsibility to check if the folio is * still mapped if needed (use TTU_SYNC to prevent accounting races). * * Context: Caller must hold the folio lock. */ void try_to_unmap(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } /* * @arg: enum ttu_flags will be passed to this argument. * * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs * containing migration entries. */ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); bool anon_exclusive, writable, ret = true; pte_t pteval; struct page *subpage; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; unsigned long hsz = 0; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_migrate() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* We need the huge page size for set_huge_pte_at() */ hsz = huge_page_size(hstate_vma(vma)); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* PMD-mapped THP migration entry */ if (!pvmw.pte) { if (flags & TTU_SPLIT_HUGE_PMD) { split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, true); ret = false; page_vma_mapped_walk_done(&pvmw); break; } #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION subpage = folio_page(folio, pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || !folio_test_pmd_mappable(folio), folio); if (set_pmd_migration_entry(&pvmw, subpage)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } continue; #endif } /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); /* * Handle PFN swap PTEs, such as device-exclusive ones, that * actually map pages. */ pteval = ptep_get(pvmw.pte); if (likely(pte_present(pteval))) { pfn = pte_pfn(pteval); } else { pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and * fail if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) { page_vma_mapped_walk_done(&pvmw); ret = false; break; } if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ page_vma_mapped_walk_done(&pvmw); break; } hugetlb_vma_unlock_write(vma); } /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); if (pte_dirty(pteval)) folio_mark_dirty(folio); writable = pte_write(pteval); } else if (likely(pte_present(pteval))) { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } if (pte_dirty(pteval)) folio_mark_dirty(folio); writable = pte_write(pteval); } else { pte_clear(mm, address, pvmw.pte); writable = is_writable_device_private_entry(pte_to_swp_entry(pteval)); } VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && !anon_exclusive, folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (PageHWPoison(subpage)) { VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio); pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (likely(pte_present(pteval)) && pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(folio)); } else { swp_entry_t entry; pte_t swp_pte; /* * arch_unmap_one() is expected to be a NOP on * architectures where we could have PFN swap PTEs, * so we'll not check/care. */ if (arch_unmap_one(mm, vma, address, pteval) < 0) { if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); else set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (folio_test_hugetlb(folio)) { if (anon_exclusive && hugetlb_try_share_anon_rmap(folio)) { set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); ret = false; page_vma_mapped_walk_done(&pvmw); break; } } else if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, subpage)) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ if (writable) entry = make_writable_migration_entry( page_to_pfn(subpage)); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry( page_to_pfn(subpage)); else entry = make_readable_migration_entry( page_to_pfn(subpage)); if (likely(pte_present(pteval))) { if (pte_young(pteval)) entry = make_migration_entry_young(entry); if (pte_dirty(pteval)) entry = make_migration_entry_dirty(entry); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); } else { swp_pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_swp_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); } if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, swp_pte, hsz); else set_pte_at(mm, address, pvmw.pte, swp_pte); trace_set_migration_pte(address, pte_val(swp_pte), folio_order(folio)); /* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ } if (unlikely(folio_test_hugetlb(folio))) hugetlb_remove_rmap(folio); else folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); } mmu_notifier_invalidate_range_end(&range); return ret; } /** * try_to_migrate - try to replace all page table mappings with swap entries * @folio: the folio to replace page table entries for * @flags: action and flags * * Tries to remove all the page table entries which are mapping this folio and * replace them with special swap entries. Caller must hold the folio lock. */ void try_to_migrate(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_migrate_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; /* * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. */ if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC | TTU_BATCH_FLUSH))) return; if (folio_is_zone_device(folio) && (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) return; /* * During exec, a temporary VMA is setup and later moved. * The VMA is moved under the anon_vma lock but not the * page tables leading to a race where migration cannot * find the migration ptes. Rather than increasing the * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ if (!folio_test_ksm(folio) && folio_test_anon(folio)) rwc.invalid_vma = invalid_migration_vma; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } #ifdef CONFIG_DEVICE_PRIVATE /** * make_device_exclusive() - Mark a page for exclusive use by a device * @mm: mm_struct of associated target process * @addr: the virtual address to mark for exclusive device access * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering * @foliop: folio pointer will be stored here on success. * * This function looks up the page mapped at the given address, grabs a * folio reference, locks the folio and replaces the PTE with special * device-exclusive PFN swap entry, preventing access through the process * page tables. The function will return with the folio locked and referenced. * * On fault, the device-exclusive entries are replaced with the original PTE * under folio lock, after calling MMU notifiers. * * Only anonymous non-hugetlb folios are supported and the VMA must have * write permissions such that we can fault in the anonymous page writable * in order to mark it exclusive. The caller must hold the mmap_lock in read * mode. * * A driver using this to program access from a device must use a mmu notifier * critical section to hold a device specific lock during programming. Once * programming is complete it should drop the folio lock and reference after * which point CPU access to the page will revoke the exclusive access. * * Notes: * #. This function always operates on individual PTEs mapping individual * pages. PMD-sized THPs are first remapped to be mapped by PTEs before * the conversion happens on a single PTE corresponding to @addr. * #. While concurrent access through the process page tables is prevented, * concurrent access through other page references (e.g., earlier GUP * invocation) is not handled and not supported. * #. device-exclusive entries are considered "clean" and "old" by core-mm. * Device drivers must update the folio state when informed by MMU * notifiers. * * Returns: pointer to mapped page on success, otherwise a negative error. */ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, void *owner, struct folio **foliop) { struct mmu_notifier_range range; struct folio *folio, *fw_folio; struct vm_area_struct *vma; struct folio_walk fw; struct page *page; swp_entry_t entry; pte_t swp_pte; int ret; mmap_assert_locked(mm); addr = PAGE_ALIGN_DOWN(addr); /* * Fault in the page writable and try to lock it; note that if the * address would already be marked for exclusive use by a device, * the GUP call would undo that first by triggering a fault. * * If any other device would already map this page exclusively, the * fault will trigger a conversion to an ordinary * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. */ retry: page = get_user_page_vma_remote(mm, addr, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, &vma); if (IS_ERR(page)) return page; folio = page_folio(page); if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { folio_put(folio); return ERR_PTR(-EOPNOTSUPP); } ret = folio_lock_killable(folio); if (ret) { folio_put(folio); return ERR_PTR(ret); } /* * Inform secondary MMUs that we are going to convert this PTE to * device-exclusive, such that they unmap it now. Note that the * caller must filter this event out to prevent livelocks. */ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, mm, addr, addr + PAGE_SIZE, owner); mmu_notifier_invalidate_range_start(&range); /* * Let's do a second walk and make sure we still find the same page * mapped writable. Note that any page of an anonymous folio can * only be mapped writable using exactly one PTE ("exclusive"), so * there cannot be other mappings. */ fw_folio = folio_walk_start(&fw, vma, addr, 0); if (fw_folio != folio || fw.page != page || fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) { if (fw_folio) folio_walk_end(&fw, vma); mmu_notifier_invalidate_range_end(&range); folio_unlock(folio); folio_put(folio); goto retry; } /* Nuke the page table entry so we get the uptodate dirty bit. */ flush_cache_page(vma, addr, page_to_pfn(page)); fw.pte = ptep_clear_flush(vma, addr, fw.ptep); /* Set the dirty flag on the folio now the PTE is gone. */ if (pte_dirty(fw.pte)) folio_mark_dirty(folio); /* * Store the pfn of the page in a special device-exclusive PFN swap PTE. * do_swap_page() will trigger the conversion back while holding the * folio lock. */ entry = make_device_exclusive_entry(page_to_pfn(page)); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(fw.pte)) swp_pte = pte_swp_mksoft_dirty(swp_pte); /* The pte is writable, uffd-wp does not apply. */ set_pte_at(mm, addr, fw.ptep, swp_pte); folio_walk_end(&fw, vma); mmu_notifier_invalidate_range_end(&range); *foliop = folio; return page; } EXPORT_SYMBOL_GPL(make_device_exclusive); #endif void __put_anon_vma(struct anon_vma *anon_vma) { struct anon_vma *root = anon_vma->root; anon_vma_free(anon_vma); if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); } static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; if (rwc->anon_lock) return rwc->anon_lock(folio, rwc); /* * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages * are holding mmap_lock. Users without mmap_lock are required to * take a reference count to prevent the anon_vma disappearing */ anon_vma = folio_anon_vma(folio); if (!anon_vma) return NULL; if (anon_vma_trylock_read(anon_vma)) goto out; if (rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } anon_vma_lock_read(anon_vma); out: return anon_vma; } /* * rmap_walk_anon - do something to anonymous page using the object-based * rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma * chains contained in the anon_vma struct it points to. */ static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { struct anon_vma *anon_vma; pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; if (locked) { anon_vma = folio_anon_vma(folio); /* anon_vma disappear under us? */ VM_BUG_ON_FOLIO(!anon_vma, folio); } else { anon_vma = rmap_walk_anon_lock(folio, rwc); } if (!anon_vma) return; pgoff_start = folio_pgoff(folio); pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(vma, pgoff_start, folio_nr_pages(folio)); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) break; if (rwc->done && rwc->done(folio)) break; } if (!locked) anon_vma_unlock_read(anon_vma); } /** * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping * of a page mapped within a specified page cache object at a specified offset. * * @folio: Either the folio whose mappings to traverse, or if NULL, * the callbacks specified in @rwc will be configured such * as to be able to look up mappings correctly. * @mapping: The page cache object whose mapping VMAs we intend to * traverse. If @folio is non-NULL, this should be equal to * folio_mapping(folio). * @pgoff_start: The offset within @mapping of the page which we are * looking up. If @folio is non-NULL, this should be equal * to folio_pgoff(folio). * @nr_pages: The number of pages mapped by the mapping. If @folio is * non-NULL, this should be equal to folio_nr_pages(folio). * @rwc: The reverse mapping walk control object describing how * the traversal should proceed. * @locked: Is the @mapping already locked? If not, we acquire the * lock. */ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, pgoff_t pgoff_start, unsigned long nr_pages, struct rmap_walk_control *rwc, bool locked) { pgoff_t pgoff_end = pgoff_start + nr_pages - 1; struct vm_area_struct *vma; VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); if (!locked) { if (i_mmap_trylock_read(mapping)) goto lookup; if (rwc->try_lock) { rwc->contended = true; return; } i_mmap_lock_read(mapping); } lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { unsigned long address = vma_address(vma, pgoff_start, nr_pages); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) goto done; if (rwc->done && rwc->done(folio)) goto done; } done: if (!locked) i_mmap_unlock_read(mapping); } /* * rmap_walk_file - do something to file page using the object-based rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma chains * contained in the address_space struct it points to. */ static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { /* * The folio lock not only makes sure that folio->mapping cannot * suddenly be NULLified by truncation, it makes sure that the structure * at mapping cannot be freed and reused yet, so we can safely take * mapping->i_mmap_rwsem. */ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!folio->mapping) return; __rmap_walk_file(folio, folio->mapping, folio->index, folio_nr_pages(folio), rwc, locked); } void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { if (unlikely(folio_test_ksm(folio))) rmap_walk_ksm(folio, rwc); else if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, false); else rmap_walk_file(folio, rwc, false); } /* Like rmap_walk, but caller holds relevant rmap lock */ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) { /* no ksm support for now */ VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, true); else rmap_walk_file(folio, rwc, true); } #ifdef CONFIG_HUGETLB_PAGE /* * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. */ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); atomic_inc(&folio->_large_mapcount); if (flags & RMAP_EXCLUSIVE) SetPageAnonExclusive(&folio->page); VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && PageAnonExclusive(&folio->page), folio); } void hugetlb_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_large_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); __folio_set_anon(folio, vma, address, true); SetPageAnonExclusive(&folio->page); } #endif /* CONFIG_HUGETLB_PAGE */ |
5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include <linux/module.h> #include <linux/usb.h> #include "main.h" #include "rtw8723d.h" #include "usb.h" static const struct usb_device_id rtw_8723du_id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xd723, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8723d_hw_spec) }, /* 8723DU 1*1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd611, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8723d_hw_spec) }, /* Edimax EW-7611ULB V2 */ { }, }; MODULE_DEVICE_TABLE(usb, rtw_8723du_id_table); static int rtw8723du_probe(struct usb_interface *intf, const struct usb_device_id *id) { return rtw_usb_probe(intf, id); } static struct usb_driver rtw_8723du_driver = { .name = KBUILD_MODNAME, .id_table = rtw_8723du_id_table, .probe = rtw8723du_probe, .disconnect = rtw_usb_disconnect, }; module_usb_driver(rtw_8723du_driver); MODULE_AUTHOR("Hans Ulli Kroll <linux@ulli-kroll.de>"); MODULE_DESCRIPTION("Realtek 802.11n wireless 8723du driver"); MODULE_LICENSE("Dual BSD/GPL"); |
11 2 2 2 1 1 1 2 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 8 11 11 11 2 11 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 | // SPDX-License-Identifier: ISC /* * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> */ #include <linux/module.h> #include "mt76.h" #include "usb_trace.h" #include "dma.h" #define MT_VEND_REQ_MAX_RETRY 10 #define MT_VEND_REQ_TOUT_MS 300 static bool disable_usb_sg; module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t len) { struct usb_interface *uintf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(uintf); unsigned int pipe; int i, ret; lockdep_assert_held(&dev->usb.usb_ctrl_mtx); pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) : usb_sndctrlpipe(udev, 0); for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { if (test_bit(MT76_REMOVED, &dev->phy.state)) return -EIO; ret = usb_control_msg(udev, pipe, req, req_type, val, offset, buf, len, MT_VEND_REQ_TOUT_MS); if (ret == -ENODEV || ret == -EPROTO) set_bit(MT76_REMOVED, &dev->phy.state); if (ret >= 0 || ret == -ENODEV || ret == -EPROTO) return ret; usleep_range(5000, 10000); } dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", req, offset, ret); return ret; } EXPORT_SYMBOL_GPL(__mt76u_vendor_request); int mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t len) { int ret; mutex_lock(&dev->usb.usb_ctrl_mtx); ret = __mt76u_vendor_request(dev, req, req_type, val, offset, buf, len); trace_usb_reg_wr(dev, offset, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); return ret; } EXPORT_SYMBOL_GPL(mt76u_vendor_request); u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr) { struct mt76_usb *usb = &dev->usb; u32 data = ~0; int ret; ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16, addr, usb->data, sizeof(__le32)); if (ret == sizeof(__le32)) data = get_unaligned_le32(usb->data); trace_usb_reg_rr(dev, addr, data); return data; } EXPORT_SYMBOL_GPL(___mt76u_rr); static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) { u8 req; switch (addr & MT_VEND_TYPE_MASK) { case MT_VEND_TYPE_EEPROM: req = MT_VEND_READ_EEPROM; break; case MT_VEND_TYPE_CFG: req = MT_VEND_READ_CFG; break; default: req = MT_VEND_MULTI_READ; break; } return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR, addr & ~MT_VEND_TYPE_MASK); } static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) { u32 ret; mutex_lock(&dev->usb.usb_ctrl_mtx); ret = __mt76u_rr(dev, addr); mutex_unlock(&dev->usb.usb_ctrl_mtx); return ret; } void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr, u32 val) { struct mt76_usb *usb = &dev->usb; put_unaligned_le32(val, usb->data); __mt76u_vendor_request(dev, req, req_type, addr >> 16, addr, usb->data, sizeof(__le32)); trace_usb_reg_wr(dev, addr, val); } EXPORT_SYMBOL_GPL(___mt76u_wr); static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) { u8 req; switch (addr & MT_VEND_TYPE_MASK) { case MT_VEND_TYPE_CFG: req = MT_VEND_WRITE_CFG; break; default: req = MT_VEND_MULTI_WRITE; break; } ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, addr & ~MT_VEND_TYPE_MASK, val); } static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) { mutex_lock(&dev->usb.usb_ctrl_mtx); __mt76u_wr(dev, addr, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); } static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val) { mutex_lock(&dev->usb.usb_ctrl_mtx); val |= __mt76u_rr(dev, addr) & ~mask; __mt76u_wr(dev, addr, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); return val; } static void mt76u_copy(struct mt76_dev *dev, u32 offset, const void *data, int len) { struct mt76_usb *usb = &dev->usb; const u8 *val = data; int ret; int current_batch_size; int i = 0; /* Assure that always a multiple of 4 bytes are copied, * otherwise beacons can be corrupted. * See: "mt76: round up length on mt76_wr_copy" * Commit 850e8f6fbd5d0003b0 */ len = round_up(len, 4); mutex_lock(&usb->usb_ctrl_mtx); while (i < len) { current_batch_size = min_t(int, usb->data_len, len - i); memcpy(usb->data, val + i, current_batch_size); ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, USB_DIR_OUT | USB_TYPE_VENDOR, 0, offset + i, usb->data, current_batch_size); if (ret < 0) break; i += current_batch_size; } mutex_unlock(&usb->usb_ctrl_mtx); } void mt76u_read_copy(struct mt76_dev *dev, u32 offset, void *data, int len) { struct mt76_usb *usb = &dev->usb; int i = 0, batch_len, ret; u8 *val = data; len = round_up(len, 4); mutex_lock(&usb->usb_ctrl_mtx); while (i < len) { batch_len = min_t(int, usb->data_len, len - i); ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT, USB_DIR_IN | USB_TYPE_VENDOR, (offset + i) >> 16, offset + i, usb->data, batch_len); if (ret < 0) break; memcpy(val + i, usb->data, batch_len); i += batch_len; } mutex_unlock(&usb->usb_ctrl_mtx); } EXPORT_SYMBOL_GPL(mt76u_read_copy); void mt76u_single_wr(struct mt76_dev *dev, const u8 req, const u16 offset, const u32 val) { mutex_lock(&dev->usb.usb_ctrl_mtx); __mt76u_vendor_request(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, val & 0xffff, offset, NULL, 0); __mt76u_vendor_request(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, val >> 16, offset + 2, NULL, 0); mutex_unlock(&dev->usb.usb_ctrl_mtx); } EXPORT_SYMBOL_GPL(mt76u_single_wr); static int mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, const struct mt76_reg_pair *data, int len) { struct mt76_usb *usb = &dev->usb; mutex_lock(&usb->usb_ctrl_mtx); while (len > 0) { __mt76u_wr(dev, base + data->reg, data->value); len--; data++; } mutex_unlock(&usb->usb_ctrl_mtx); return 0; } static int mt76u_wr_rp(struct mt76_dev *dev, u32 base, const struct mt76_reg_pair *data, int n) { if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); else return mt76u_req_wr_rp(dev, base, data, n); } static int mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, int len) { struct mt76_usb *usb = &dev->usb; mutex_lock(&usb->usb_ctrl_mtx); while (len > 0) { data->value = __mt76u_rr(dev, base + data->reg); len--; data++; } mutex_unlock(&usb->usb_ctrl_mtx); return 0; } static int mt76u_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, int n) { if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); else return mt76u_req_rd_rp(dev, base, data, n); } static bool mt76u_check_sg(struct mt76_dev *dev) { struct usb_interface *uintf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(uintf); return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && udev->bus->no_sg_constraint); } static int mt76u_set_endpoints(struct usb_interface *intf, struct mt76_usb *usb) { struct usb_host_interface *intf_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_desc; int i, in_ep = 0, out_ep = 0; for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc) && in_ep < __MT_EP_IN_MAX) { usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); in_ep++; } else if (usb_endpoint_is_bulk_out(ep_desc) && out_ep < __MT_EP_OUT_MAX) { usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); out_ep++; } } if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) return -EINVAL; return 0; } static int mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, int nsgs) { int i; for (i = 0; i < nsgs; i++) { void *data; int offset; data = mt76_get_page_pool_buf(q, &offset, q->buf_size); if (!data) break; sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size, offset); } if (i < nsgs) { int j; for (j = nsgs; j < urb->num_sgs; j++) mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false); urb->num_sgs = i; } urb->num_sgs = max_t(int, i, urb->num_sgs); urb->transfer_buffer_length = urb->num_sgs * q->buf_size; sg_init_marker(urb->sg, urb->num_sgs); return i ? : -ENOMEM; } static int mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, int nsgs) { enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; int offset; if (qid == MT_RXQ_MAIN && dev->usb.sg_en) return mt76u_fill_rx_sg(dev, q, urb, nsgs); urb->transfer_buffer_length = q->buf_size; urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size); return urb->transfer_buffer ? 0 : -ENOMEM; } static int mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e, int sg_max_size) { unsigned int size = sizeof(struct urb); if (dev->usb.sg_en) size += sg_max_size * sizeof(struct scatterlist); e->urb = kzalloc(size, GFP_KERNEL); if (!e->urb) return -ENOMEM; usb_init_urb(e->urb); if (dev->usb.sg_en && sg_max_size > 0) e->urb->sg = (struct scatterlist *)(e->urb + 1); return 0; } static int mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_entry *e) { enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; int err, sg_size; sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0; err = mt76u_urb_alloc(dev, e, sg_size); if (err) return err; return mt76u_refill_rx(dev, q, e->urb, sg_size); } static void mt76u_urb_free(struct urb *urb) { int i; for (i = 0; i < urb->num_sgs; i++) mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false); if (urb->transfer_buffer) mt76_put_page_pool_buf(urb->transfer_buffer, false); usb_free_urb(urb); } static void mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, struct urb *urb, usb_complete_t complete_fn, void *context) { struct usb_interface *uintf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(uintf); unsigned int pipe; if (dir == USB_DIR_IN) pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); else pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); urb->dev = udev; urb->pipe = pipe; urb->complete = complete_fn; urb->context = context; } static struct urb * mt76u_get_next_rx_entry(struct mt76_queue *q) { struct urb *urb = NULL; unsigned long flags; spin_lock_irqsave(&q->lock, flags); if (q->queued > 0) { urb = q->entry[q->tail].urb; q->tail = (q->tail + 1) % q->ndesc; q->queued--; } spin_unlock_irqrestore(&q->lock, flags); return urb; } static int mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data, u32 data_len) { u16 dma_len, min_len; dma_len = get_unaligned_le16(data); if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR) return dma_len; min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN; if (data_len < min_len || !dma_len || dma_len + MT_DMA_HDR_LEN > data_len || (dma_len & 0x3)) return -EINVAL; return dma_len; } static struct sk_buff * mt76u_build_rx_skb(struct mt76_dev *dev, void *data, int len, int buf_size) { int head_room, drv_flags = dev->drv->drv_flags; struct sk_buff *skb; head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) { struct page *page; /* slow path, not enough space for data and * skb_shared_info */ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC); if (!skb) return NULL; skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN); data += head_room + MT_SKB_HEAD_LEN; page = virt_to_head_page(data); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, data - page_address(page), len - MT_SKB_HEAD_LEN, buf_size); return skb; } /* fast path */ skb = build_skb(data, buf_size); if (!skb) return NULL; skb_reserve(skb, head_room); __skb_put(skb, len); return skb; } static int mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb, int buf_size) { u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags; struct sk_buff *skb; if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) return 0; len = mt76u_get_rx_entry_len(dev, data, urb->actual_length); if (len < 0) return 0; head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; data_len = min_t(int, len, data_len - head_room); if (len == data_len && dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len)) return 0; skb = mt76u_build_rx_skb(dev, data, data_len, buf_size); if (!skb) return 0; len -= data_len; while (len > 0 && nsgs < urb->num_sgs) { data_len = min_t(int, len, urb->sg[nsgs].length); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, sg_page(&urb->sg[nsgs]), urb->sg[nsgs].offset, data_len, buf_size); len -= data_len; nsgs++; } skb_mark_for_recycle(skb); dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL); return nsgs; } static void mt76u_complete_rx(struct urb *urb) { struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); struct mt76_queue *q = urb->context; unsigned long flags; trace_rx_urb(dev, urb); switch (urb->status) { case -ECONNRESET: case -ESHUTDOWN: case -ENOENT: case -EPROTO: return; default: dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", urb->status); fallthrough; case 0: break; } spin_lock_irqsave(&q->lock, flags); if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch")) goto out; q->head = (q->head + 1) % q->ndesc; q->queued++; mt76_worker_schedule(&dev->usb.rx_worker); out: spin_unlock_irqrestore(&q->lock, flags); } static int mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid, struct urb *urb) { int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP; mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb, mt76u_complete_rx, &dev->q_rx[qid]); trace_submit_urb(dev, urb); return usb_submit_urb(urb, GFP_ATOMIC); } static void mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) { int qid = q - &dev->q_rx[MT_RXQ_MAIN]; struct urb *urb; int err, count; while (true) { urb = mt76u_get_next_rx_entry(q); if (!urb) break; count = mt76u_process_rx_entry(dev, urb, q->buf_size); if (count > 0) { err = mt76u_refill_rx(dev, q, urb, count); if (err < 0) break; } mt76u_submit_rx_buf(dev, qid, urb); } if (qid == MT_RXQ_MAIN) { local_bh_disable(); mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); local_bh_enable(); } } static void mt76u_rx_worker(struct mt76_worker *w) { struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker); struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); int i; rcu_read_lock(); mt76_for_each_q_rx(dev, i) mt76u_process_rx_queue(dev, &dev->q_rx[i]); rcu_read_unlock(); } static int mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid) { struct mt76_queue *q = &dev->q_rx[qid]; unsigned long flags; int i, err = 0; spin_lock_irqsave(&q->lock, flags); for (i = 0; i < q->ndesc; i++) { err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb); if (err < 0) break; } q->head = q->tail = 0; q->queued = 0; spin_unlock_irqrestore(&q->lock, flags); return err; } static int mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) { struct mt76_queue *q = &dev->q_rx[qid]; int i, err; err = mt76_create_page_pool(dev, q); if (err) return err; spin_lock_init(&q->lock); q->entry = devm_kcalloc(dev->dev, MT_NUM_RX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; q->ndesc = MT_NUM_RX_ENTRIES; q->buf_size = PAGE_SIZE; for (i = 0; i < q->ndesc; i++) { err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]); if (err < 0) return err; } return mt76u_submit_rx_buffers(dev, qid); } int mt76u_alloc_mcu_queue(struct mt76_dev *dev) { return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU); } EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue); static void mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) { int i; for (i = 0; i < q->ndesc; i++) { if (!q->entry[i].urb) continue; mt76u_urb_free(q->entry[i].urb); q->entry[i].urb = NULL; } page_pool_destroy(q->page_pool); q->page_pool = NULL; } static void mt76u_free_rx(struct mt76_dev *dev) { int i; mt76_worker_teardown(&dev->usb.rx_worker); mt76_for_each_q_rx(dev, i) mt76u_free_rx_queue(dev, &dev->q_rx[i]); } void mt76u_stop_rx(struct mt76_dev *dev) { int i; mt76_worker_disable(&dev->usb.rx_worker); mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; int j; for (j = 0; j < q->ndesc; j++) usb_poison_urb(q->entry[j].urb); } } EXPORT_SYMBOL_GPL(mt76u_stop_rx); int mt76u_resume_rx(struct mt76_dev *dev) { int i; mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; int err, j; for (j = 0; j < q->ndesc; j++) usb_unpoison_urb(q->entry[j].urb); err = mt76u_submit_rx_buffers(dev, i); if (err < 0) return err; } mt76_worker_enable(&dev->usb.rx_worker); return 0; } EXPORT_SYMBOL_GPL(mt76u_resume_rx); static void mt76u_status_worker(struct mt76_worker *w) { struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker); struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); struct mt76_queue_entry entry; struct mt76_queue *q; int i; if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state)) return; for (i = 0; i <= MT_TXQ_PSD; i++) { q = dev->phy.q_tx[i]; if (!q) continue; while (q->queued > 0) { if (!q->entry[q->tail].done) break; entry = q->entry[q->tail]; q->entry[q->tail].done = false; mt76_queue_tx_complete(dev, q, &entry); } if (!q->queued) wake_up(&dev->tx_wait); mt76_worker_schedule(&dev->tx_worker); } if (dev->drv->tx_status_data && !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) queue_work(dev->wq, &dev->usb.stat_work); } static void mt76u_tx_status_data(struct work_struct *work) { struct mt76_usb *usb; struct mt76_dev *dev; u8 update = 1; u16 count = 0; usb = container_of(work, struct mt76_usb, stat_work); dev = container_of(usb, struct mt76_dev, usb); while (true) { if (test_bit(MT76_REMOVED, &dev->phy.state)) break; if (!dev->drv->tx_status_data(dev, &update)) break; count++; } if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) queue_work(dev->wq, &usb->stat_work); else clear_bit(MT76_READING_STATS, &dev->phy.state); } static void mt76u_complete_tx(struct urb *urb) { struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); struct mt76_queue_entry *e = urb->context; if (mt76u_urb_error(urb)) dev_err(dev->dev, "tx urb failed: %d\n", urb->status); e->done = true; mt76_worker_schedule(&dev->usb.status_worker); } static int mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, struct urb *urb) { urb->transfer_buffer_length = skb->len; if (!dev->usb.sg_en) { urb->transfer_buffer = skb->data; return 0; } sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); if (!urb->num_sgs) return -ENOMEM; return urb->num_sgs; } static int mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { struct mt76_tx_info tx_info = { .skb = skb, }; struct mt76_dev *dev = phy->dev; u16 idx = q->head; int err; if (q->queued == q->ndesc) return -ENOSPC; skb->prev = skb->next = NULL; err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); if (err < 0) return err; err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); if (err < 0) return err; mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb, mt76u_complete_tx, &q->entry[idx]); q->head = (q->head + 1) % q->ndesc; q->entry[idx].skb = tx_info.skb; q->entry[idx].wcid = 0xffff; q->queued++; return idx; } static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) { struct urb *urb; int err; while (q->first != q->head) { urb = q->entry[q->first].urb; trace_submit_urb(dev, urb); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err == -ENODEV) set_bit(MT76_REMOVED, &dev->phy.state); else dev_err(dev->dev, "tx urb submit failed:%d\n", err); break; } q->first = (q->first + 1) % q->ndesc; } } static void mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid) { u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE; switch (mt76_chip(dev)) { case 0x7663: { static const u8 lmac_queue_map[] = { /* ac to lmac mapping */ [IEEE80211_AC_BK] = 0, [IEEE80211_AC_BE] = 1, [IEEE80211_AC_VI] = 2, [IEEE80211_AC_VO] = 4, }; q->hw_idx = lmac_queue_map[ac]; q->ep = q->hw_idx + 1; break; } case 0x7961: case 0x7925: q->hw_idx = mt76_ac_to_hwq(ac); q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1; break; default: q->hw_idx = mt76_ac_to_hwq(ac); q->ep = q->hw_idx + 1; break; } } static int mt76u_alloc_tx(struct mt76_dev *dev) { int i; for (i = 0; i <= MT_TXQ_PSD; i++) { struct mt76_queue *q; int j, err; q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); if (!q) return -ENOMEM; spin_lock_init(&q->lock); mt76u_ac_to_hwq(dev, q, i); dev->phy.q_tx[i] = q; q->entry = devm_kcalloc(dev->dev, MT_NUM_TX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; q->ndesc = MT_NUM_TX_ENTRIES; for (j = 0; j < q->ndesc; j++) { err = mt76u_urb_alloc(dev, &q->entry[j], MT_TX_SG_MAX_SIZE); if (err < 0) return err; } } return 0; } static void mt76u_free_tx(struct mt76_dev *dev) { int i; mt76_worker_teardown(&dev->usb.status_worker); for (i = 0; i <= MT_TXQ_PSD; i++) { struct mt76_queue *q; int j; q = dev->phy.q_tx[i]; if (!q) continue; for (j = 0; j < q->ndesc; j++) { usb_free_urb(q->entry[j].urb); q->entry[j].urb = NULL; } } } void mt76u_stop_tx(struct mt76_dev *dev) { int ret; mt76_worker_disable(&dev->usb.status_worker); ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), HZ / 5); if (!ret) { struct mt76_queue_entry entry; struct mt76_queue *q; int i, j; dev_err(dev->dev, "timed out waiting for pending tx\n"); for (i = 0; i <= MT_TXQ_PSD; i++) { q = dev->phy.q_tx[i]; if (!q) continue; for (j = 0; j < q->ndesc; j++) usb_kill_urb(q->entry[j].urb); } mt76_worker_disable(&dev->tx_worker); /* On device removal we maight queue skb's, but mt76u_tx_kick() * will fail to submit urb, cleanup those skb's manually. */ for (i = 0; i <= MT_TXQ_PSD; i++) { q = dev->phy.q_tx[i]; if (!q) continue; while (q->queued > 0) { entry = q->entry[q->tail]; q->entry[q->tail].done = false; mt76_queue_tx_complete(dev, q, &entry); } } mt76_worker_enable(&dev->tx_worker); } cancel_work_sync(&dev->usb.stat_work); clear_bit(MT76_READING_STATS, &dev->phy.state); mt76_worker_enable(&dev->usb.status_worker); mt76_tx_status_check(dev, true); } EXPORT_SYMBOL_GPL(mt76u_stop_tx); void mt76u_queues_deinit(struct mt76_dev *dev) { mt76u_stop_rx(dev); mt76u_stop_tx(dev); mt76u_free_rx(dev); mt76u_free_tx(dev); } EXPORT_SYMBOL_GPL(mt76u_queues_deinit); int mt76u_alloc_queues(struct mt76_dev *dev) { int err; err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN); if (err < 0) return err; return mt76u_alloc_tx(dev); } EXPORT_SYMBOL_GPL(mt76u_alloc_queues); static const struct mt76_queue_ops usb_queue_ops = { .tx_queue_skb = mt76u_tx_queue_skb, .kick = mt76u_tx_kick, }; int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, struct mt76_bus_ops *ops) { struct usb_device *udev = interface_to_usbdev(intf); struct mt76_usb *usb = &dev->usb; int err; INIT_WORK(&usb->stat_work, mt76u_tx_status_data); usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0)); if (usb->data_len < 32) usb->data_len = 32; usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL); if (!usb->data) return -ENOMEM; mutex_init(&usb->usb_ctrl_mtx); dev->bus = ops; dev->queue_ops = &usb_queue_ops; dev_set_drvdata(&udev->dev, dev); usb->sg_en = mt76u_check_sg(dev); err = mt76u_set_endpoints(intf, usb); if (err < 0) return err; err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker, "usb-rx"); if (err) return err; err = mt76_worker_setup(dev->hw, &usb->status_worker, mt76u_status_worker, "usb-status"); if (err) return err; sched_set_fifo_low(usb->rx_worker.task); sched_set_fifo_low(usb->status_worker.task); return 0; } EXPORT_SYMBOL_GPL(__mt76u_init); int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf) { static struct mt76_bus_ops bus_ops = { .rr = mt76u_rr, .wr = mt76u_wr, .rmw = mt76u_rmw, .read_copy = mt76u_read_copy, .write_copy = mt76u_copy, .wr_rp = mt76u_wr_rp, .rd_rp = mt76u_rd_rp, .type = MT76_BUS_USB, }; return __mt76u_init(dev, intf, &bus_ops); } EXPORT_SYMBOL_GPL(mt76u_init); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); MODULE_DESCRIPTION("MediaTek MT76x USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_SMP_H #define _ASM_X86_SMP_H #ifndef __ASSEMBLER__ #include <linux/cpumask.h> #include <linux/thread_info.h> #include <asm/cpumask.h> DECLARE_PER_CPU_CACHE_HOT(int, cpu_number); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); /* cpus sharing the last level cache: */ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); struct task_struct; struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); void (*smp_cpus_done)(unsigned max_cpus); void (*stop_other_cpus)(int wait); void (*crash_stop_other_cpus)(void); void (*smp_send_reschedule)(int cpu); void (*cleanup_dead_cpu)(unsigned cpu); void (*poll_sync_state)(void); int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle); int (*cpu_disable)(void); void (*cpu_die)(unsigned int cpu); void (*play_dead)(void); void (*stop_this_cpu)(void); void (*send_call_func_ipi)(const struct cpumask *mask); void (*send_call_func_single_ipi)(int cpu); }; /* Globals due to paravirt */ extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP extern struct smp_ops smp_ops; static inline void smp_send_stop(void) { smp_ops.stop_other_cpus(0); } static inline void stop_other_cpus(void) { smp_ops.stop_other_cpus(1); } static inline void smp_prepare_cpus(unsigned int max_cpus) { smp_ops.smp_prepare_cpus(max_cpus); } static inline void smp_cpus_done(unsigned int max_cpus) { smp_ops.smp_cpus_done(max_cpus); } static inline int __cpu_disable(void) { return smp_ops.cpu_disable(); } static inline void __cpu_die(unsigned int cpu) { if (smp_ops.cpu_die) smp_ops.cpu_die(cpu); } static inline void __noreturn play_dead(void) { smp_ops.play_dead(); BUG(); } static inline void arch_smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); } static inline void arch_send_call_function_single_ipi(int cpu) { smp_ops.send_call_func_single_ipi(cpu); } static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_ops.send_call_func_ipi(mask); } void cpu_disable_common(void); void native_smp_prepare_boot_cpu(void); void smp_prepare_cpus_common(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_kick_ap(unsigned int cpu, struct task_struct *tidle); int native_cpu_disable(void); void __noreturn hlt_play_dead(void); void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); void smp_kick_mwait_play_dead(void); void __noreturn mwait_play_dead(unsigned int eax_hint); void native_smp_send_reschedule(int cpu); void native_send_call_func_ipi(const struct cpumask *mask); void native_send_call_func_single_ipi(int cpu); asmlinkage __visible void smp_reboot_interrupt(void); __visible void smp_reschedule_interrupt(struct pt_regs *regs); __visible void smp_call_function_interrupt(struct pt_regs *regs); __visible void smp_call_function_single_interrupt(struct pt_regs *r); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. */ #define raw_smp_processor_id() this_cpu_read(cpu_number) #define __smp_processor_id() __this_cpu_read(cpu_number) static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return per_cpu(cpu_llc_shared_map, cpu); } static inline struct cpumask *cpu_l2c_shared_mask(int cpu) { return per_cpu(cpu_l2c_shared_map, cpu); } #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() static inline int wbinvd_on_all_cpus(void) { wbinvd(); return 0; } static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return (struct cpumask *)cpumask_of(0); } static inline void __noreturn mwait_play_dead(unsigned int eax_hint) { BUG(); } #endif /* CONFIG_SMP */ #ifdef CONFIG_DEBUG_NMI_SELFTEST extern void nmi_selftest(void); #else #define nmi_selftest() do { } while (0) #endif extern unsigned int smpboot_control; extern unsigned long apic_mmio_base; #endif /* !__ASSEMBLER__ */ /* Control bits for startup_64 */ #define STARTUP_READ_APICID 0x80000000 /* Top 8 bits are reserved for control */ #define STARTUP_PARALLEL_MASK 0xFF000000 #endif /* _ASM_X86_SMP_H */ |
9 9 9 9 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 | // SPDX-License-Identifier: GPL-2.0 /* * High-level sync()-related operations */ #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/writeback.h> #include <linux/syscalls.h> #include <linux/linkage.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/backing-dev.h> #include "internal.h" #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) /* * Write out and wait upon all dirty data associated with this * superblock. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ int sync_filesystem(struct super_block *sb) { int ret = 0; /* * We need to be protected against the filesystem going from * r/o to r/w or vice versa. */ WARN_ON(!rwsem_is_locked(&sb->s_umount)); /* * No point in syncing out anything if the filesystem is read-only. */ if (sb_rdonly(sb)) return 0; /* * Do the filesystem syncing work. For simple filesystems * writeback_inodes_sb(sb) just dirties buffers with inodes so we have * to submit I/O for these buffers via sync_blockdev(). This also * speeds up the wait == 1 case since in that case write_inode() * methods call sync_dirty_buffer() and thus effectively write one block * at a time. */ writeback_inodes_sb(sb, WB_REASON_SYNC); if (sb->s_op->sync_fs) { ret = sb->s_op->sync_fs(sb, 0); if (ret) return ret; } ret = sync_blockdev_nowait(sb->s_bdev); if (ret) return ret; sync_inodes_sb(sb); if (sb->s_op->sync_fs) { ret = sb->s_op->sync_fs(sb, 1); if (ret) return ret; } return sync_blockdev(sb->s_bdev); } EXPORT_SYMBOL(sync_filesystem); static void sync_inodes_one_sb(struct super_block *sb, void *arg) { if (!sb_rdonly(sb)) sync_inodes_sb(sb); } static void sync_fs_one_sb(struct super_block *sb, void *arg) { if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) && sb->s_op->sync_fs) sb->s_op->sync_fs(sb, *(int *)arg); } /* * Sync everything. We start by waking flusher threads so that most of * writeback runs on all devices in parallel. Then we sync all inodes reliably * which effectively also waits for all flusher threads to finish doing * writeback. At this point all data is on disk so metadata should be stable * and we tell filesystems to sync their metadata via ->sync_fs() calls. * Finally, we writeout all block devices because some filesystems (e.g. ext2) * just write metadata (such as inodes or bitmaps) to block device page cache * and do not sync it on their own in ->sync_fs(). */ void ksys_sync(void) { int nowait = 0, wait = 1; wakeup_flusher_threads(WB_REASON_SYNC); iterate_supers(sync_inodes_one_sb, NULL); iterate_supers(sync_fs_one_sb, &nowait); iterate_supers(sync_fs_one_sb, &wait); sync_bdevs(false); sync_bdevs(true); if (unlikely(laptop_mode)) laptop_sync_completion(); } SYSCALL_DEFINE0(sync) { ksys_sync(); return 0; } static void do_sync_work(struct work_struct *work) { int nowait = 0; /* * Sync twice to reduce the possibility we skipped some inodes / pages * because they were temporarily locked */ iterate_supers(sync_inodes_one_sb, &nowait); iterate_supers(sync_fs_one_sb, &nowait); sync_bdevs(false); iterate_supers(sync_inodes_one_sb, &nowait); iterate_supers(sync_fs_one_sb, &nowait); sync_bdevs(false); printk("Emergency Sync complete\n"); kfree(work); } void emergency_sync(void) { struct work_struct *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) { INIT_WORK(work, do_sync_work); schedule_work(work); } } /* * sync a single super */ SYSCALL_DEFINE1(syncfs, int, fd) { CLASS(fd, f)(fd); struct super_block *sb; int ret, ret2; if (fd_empty(f)) return -EBADF; sb = fd_file(f)->f_path.dentry->d_sb; down_read(&sb->s_umount); ret = sync_filesystem(sb); up_read(&sb->s_umount); ret2 = errseq_check_and_advance(&sb->s_wb_err, &fd_file(f)->f_sb_err); return ret ? ret : ret2; } /** * vfs_fsync_range - helper to sync a range of data & metadata to disk * @file: file to sync * @start: offset in bytes of the beginning of data range to sync * @end: offset in bytes of the end of data range (inclusive) * @datasync: perform only datasync * * Write back data in range @start..@end and metadata for @file to disk. If * @datasync is set only metadata needed to access modified file data is * written. */ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; if (!file->f_op->fsync) return -EINVAL; if (!datasync && (inode->i_state & I_DIRTY_TIME)) mark_inode_dirty_sync(inode); return file->f_op->fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); /** * vfs_fsync - perform a fsync or fdatasync on a file * @file: file to sync * @datasync: only perform a fdatasync operation * * Write back data and metadata for @file to disk. If @datasync is * set only metadata needed to access modified file data is written. */ int vfs_fsync(struct file *file, int datasync) { return vfs_fsync_range(file, 0, LLONG_MAX, datasync); } EXPORT_SYMBOL(vfs_fsync); static int do_fsync(unsigned int fd, int datasync) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_fsync(fd_file(f), datasync); } SYSCALL_DEFINE1(fsync, unsigned int, fd) { return do_fsync(fd, 0); } SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { return do_fsync(fd, 1); } int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, unsigned int flags) { int ret; struct address_space *mapping; loff_t endbyte; /* inclusive */ umode_t i_mode; ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; endbyte = offset + nbytes; if ((s64)offset < 0) goto out; if ((s64)endbyte < 0) goto out; if (endbyte < offset) goto out; if (sizeof(pgoff_t) == 4) { if (offset >= (0x100000000ULL << PAGE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" */ ret = 0; goto out; } if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) { /* * Out to EOF */ nbytes = 0; } } if (nbytes == 0) endbyte = LLONG_MAX; else endbyte--; /* inclusive */ i_mode = file_inode(file)->i_mode; ret = -ESPIPE; if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && !S_ISLNK(i_mode)) goto out; mapping = file->f_mapping; ret = 0; if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { ret = file_fdatawait_range(file, offset, endbyte); if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WRITE) { int sync_mode = WB_SYNC_NONE; if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) == SYNC_FILE_RANGE_WRITE_AND_WAIT) sync_mode = WB_SYNC_ALL; ret = __filemap_fdatawrite_range(mapping, offset, endbyte, sync_mode); if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WAIT_AFTER) ret = file_fdatawait_range(file, offset, endbyte); out: return ret; } /* * ksys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * zero then ksys_sync_file_range() will operate from offset out to EOF. * * The flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range * before performing the write. * * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the * range which are not presently under writeback. Note that this may block for * significant periods due to exhaustion of disk request structures. * * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range * after performing the write. * * Useful combinations of the flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages * in the range which were dirty on entry to ksys_sync_file_range() are placed * under writeout. This is a start-write-for-data-integrity operation. * * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which * are not presently under writeout. This is an asynchronous flush-to-disk * operation. Not suitable for data integrity operations. * * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for * completion of writeout of all pages in the range. This will be used after an * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait * for that operation to complete and to return the result. * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER * (a.k.a. SYNC_FILE_RANGE_WRITE_AND_WAIT): * a traditional sync() operation. This is a write-for-data-integrity operation * which will ensure that all pages in the range which were dirty on entry to * ksys_sync_file_range() are written to disk. It should be noted that disk * caches are not flushed by this call, so there are no guarantees here that the * data will be available on disk after a crash. * * * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any * I/O errors or ENOSPC conditions and will return those to the caller, after * clearing the EIO and ENOSPC flags in the address_space. * * It should be noted that none of these operations write out the file's * metadata. So unless the application is strictly performing overwrites of * already-instantiated disk blocks, there are no guarantees here that the data * will be available after a crash. */ int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes, unsigned int flags) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return sync_file_range(fd_file(f), offset, nbytes, flags); } SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, unsigned int, flags) { return ksys_sync_file_range(fd, offset, nbytes, flags); } #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_SYNC_FILE_RANGE) COMPAT_SYSCALL_DEFINE6(sync_file_range, int, fd, compat_arg_u64_dual(offset), compat_arg_u64_dual(nbytes), unsigned int, flags) { return ksys_sync_file_range(fd, compat_arg_u64_glue(offset), compat_arg_u64_glue(nbytes), flags); } #endif /* It would be nice if people remember that not all the world's an i386 when they introduce new system calls */ SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags, loff_t, offset, loff_t, nbytes) { return ksys_sync_file_range(fd, offset, nbytes, flags); } |
56 56 25 55 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | // SPDX-License-Identifier: GPL-2.0-only /* * Lock-less NULL terminated single linked list * * The basic atomic operation of this list is cmpxchg on long. On * architectures that don't have NMI-safe cmpxchg implementation, the * list can NOT be used in NMI handlers. So code that uses the list in * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/llist.h> /** * llist_del_first - delete the first entry of lock-less list * @head: the head for your lock-less list * * If list is empty, return NULL, otherwise, return the first entry * deleted, this is the newest added one. * * Only one llist_del_first user can be used simultaneously with * multiple llist_add users without lock. Because otherwise * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add, * llist_add) sequence in another user may change @head->first->next, * but keep @head->first. If multiple consumers are needed, please * use llist_del_all or use lock between consumers. */ struct llist_node *llist_del_first(struct llist_head *head) { struct llist_node *entry, *next; entry = smp_load_acquire(&head->first); do { if (entry == NULL) return NULL; next = READ_ONCE(entry->next); } while (!try_cmpxchg(&head->first, &entry, next)); return entry; } EXPORT_SYMBOL_GPL(llist_del_first); /** * llist_del_first_this - delete given entry of lock-less list if it is first * @head: the head for your lock-less list * @this: a list entry. * * If head of the list is given entry, delete and return %true else * return %false. * * Multiple callers can safely call this concurrently with multiple * llist_add() callers, providing all the callers offer a different @this. */ bool llist_del_first_this(struct llist_head *head, struct llist_node *this) { struct llist_node *entry, *next; /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */ entry = smp_load_acquire(&head->first); do { if (entry != this) return false; next = READ_ONCE(entry->next); } while (!try_cmpxchg(&head->first, &entry, next)); return true; } EXPORT_SYMBOL_GPL(llist_del_first_this); /** * llist_reverse_order - reverse order of a llist chain * @head: first item of the list to be reversed * * Reverse the order of a chain of llist entries and return the * new first entry. */ struct llist_node *llist_reverse_order(struct llist_node *head) { struct llist_node *new_head = NULL; while (head) { struct llist_node *tmp = head; head = head->next; tmp->next = new_head; new_head = tmp; } return new_head; } EXPORT_SYMBOL_GPL(llist_reverse_order); |
7 7 1 7 3 7 6 2 5 2 2 2 2 2 7 7 2 2 5 3 3 2 7 7 7 2 7 5 7 3 1 1 2 1 3 1 1 2 1 2 2 2 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 | // SPDX-License-Identifier: GPL-2.0-or-later /* * DivIO nw80x subdriver * * Copyright (C) 2011 Jean-François Moine (http://moinejf.free.fr) * Copyright (C) 2003 Sylvain Munaut <tnt@246tNt.com> * Kjell Claesson <keyson@users.sourceforge.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "nw80x" #include "gspca.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("NW80x USB Camera Driver"); MODULE_LICENSE("GPL"); static int webcam; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u32 ae_res; s8 ag_cnt; #define AG_CNT_START 13 u8 exp_too_low_cnt; u8 exp_too_high_cnt; u8 bridge; u8 webcam; }; enum bridges { BRIDGE_NW800, /* and et31x110 */ BRIDGE_NW801, BRIDGE_NW802, }; enum webcams { Generic800, SpaceCam, /* Trust 120 SpaceCam */ SpaceCam2, /* other Trust 120 SpaceCam */ Cvideopro, /* Conceptronic Video Pro */ Dlink350c, DS3303u, Kr651us, Kritter, Mustek300, Proscope, Twinkle, DvcV6, P35u, Generic802, NWEBCAMS /* number of webcams */ }; static const u8 webcam_chip[NWEBCAMS] = { [Generic800] = BRIDGE_NW800, /* 06a5:0000 * Typhoon Webcam 100 USB */ [SpaceCam] = BRIDGE_NW800, /* 06a5:d800 * Trust SpaceCam120 or SpaceCam100 PORTABLE */ [SpaceCam2] = BRIDGE_NW800, /* 06a5:d800 - pas106 * other Trust SpaceCam120 or SpaceCam100 PORTABLE */ [Cvideopro] = BRIDGE_NW802, /* 06a5:d001 * Conceptronic Video Pro 'CVIDEOPRO USB Webcam CCD' */ [Dlink350c] = BRIDGE_NW802, /* 06a5:d001 * D-Link NetQam Pro 250plus */ [DS3303u] = BRIDGE_NW801, /* 06a5:d001 * Plustek Opticam 500U or ProLink DS3303u */ [Kr651us] = BRIDGE_NW802, /* 06a5:d001 * Panasonic GP-KR651US */ [Kritter] = BRIDGE_NW802, /* 06a5:d001 * iRez Kritter cam */ [Mustek300] = BRIDGE_NW802, /* 055f:d001 * Mustek Wcam 300 mini */ [Proscope] = BRIDGE_NW802, /* 06a5:d001 * Scalar USB Microscope (ProScope) */ [Twinkle] = BRIDGE_NW800, /* 06a5:d800 - hv7121b? (seems pas106) * Divio Chicony TwinkleCam * DSB-C110 */ [DvcV6] = BRIDGE_NW802, /* 0502:d001 * DVC V6 */ [P35u] = BRIDGE_NW801, /* 052b:d001, 06a5:d001 and 06be:d001 * EZCam Pro p35u */ [Generic802] = BRIDGE_NW802, }; /* * other webcams: * - nw801 046d:d001 * Logitech QuickCam Pro (dark focus ring) * - nw801 0728:d001 * AVerMedia Camguard * - nw??? 06a5:d001 * D-Link NetQam Pro 250plus * - nw800 065a:d800 * Showcam NGS webcam * - nw??? ????:???? * Sceptre svc300 */ /* * registers * nw800/et31x110 nw801 nw802 * 0000..009e 0000..00a1 0000..009e * 0200..0211 id id * 0300..0302 id id * 0400..0406 (inex) 0400..0406 * 0500..0505 0500..0506 (inex) * 0600..061a 0600..0601 0600..0601 * 0800..0814 id id * 1000..109c 1000..10a1 1000..109a */ /* resolutions * nw800: 320x240, 352x288 * nw801/802: 320x240, 640x480 */ static const struct v4l2_pix_format cif_mode[] = { {320, 240, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, {352, 288, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG} }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, {640, 480, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, }; /* * The sequences below contain: * - 1st and 2nd bytes: either * - register number (BE) * - I2C0 + i2c address * - 3rd byte: data length (=0 for end of sequence) * - n bytes: data */ #define I2C0 0xff static const u8 nw800_init[] = { 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x01, 0x04, 0x06, 0x01, 0x04, 0x04, 0x04, 0x03, 0x00, 0x00, 0x00, 0x05, 0x05, 0x01, 0x00, 0, 0, 0 }; static const u8 nw800_start[] = { 0x04, 0x06, 0x01, 0xc0, 0x00, 0x00, 0x40, 0x10, 0x43, 0x00, 0xb4, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x0e, 0x00, 0x74, 0x01, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x3e, 0x00, 0x24, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xa0, 0x48, 0xc3, 0x02, 0x88, 0x0c, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x06, 0x00, 0x08, 0x00, 0x32, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0xc0, 0x05, 0x00, 0x06, 0xe8, 0x00, 0x00, 0x00, 0x20, 0x20, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x83, 0x02, 0x20, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x04, 0x04, 0x01, 0xff, 0x04, 0x06, 0x01, 0xc4, 0x04, 0x06, 0x01, 0xc0, 0x00, 0x00, 0x40, 0x10, 0x43, 0x00, 0xb4, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x0e, 0x00, 0x74, 0x01, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x3e, 0x00, 0x24, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xa0, 0x48, 0xc3, 0x02, 0x88, 0x0c, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x06, 0x00, 0x08, 0x00, 0x32, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0xc0, 0x05, 0x00, 0x06, 0xe8, 0x00, 0x00, 0x00, 0x20, 0x20, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x83, 0x02, 0x20, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, 0x00, 0x80, 0x01, 0xa0, 0x10, 0x1a, 0x01, 0x00, 0x00, 0x91, 0x02, 0x6c, 0x01, 0x00, 0x03, 0x02, 0xc8, 0x01, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0x83, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x10, 0x1b, 0x02, 0x69, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x05, 0x02, 0x01, 0x02, 0x06, 0x00, 0x02, 0x04, 0xd9, 0x05, 0x05, 0x01, 0x20, 0x05, 0x05, 0x01, 0x21, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x08, 0x21, 0x3d, 0x52, 0x63, 0x75, 0x83, 0x91, 0x9e, 0xaa, 0xb6, 0xc1, 0xcc, 0xd6, 0xe0, 0xea, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x13, 0x13, 0x10, 0x03, 0x01, 0x14, 0x10, 0x41, 0x11, 0x00, 0x08, 0x21, 0x3d, 0x52, 0x63, 0x75, 0x83, 0x91, 0x9e, 0xaa, 0xb6, 0xc1, 0xcc, 0xd6, 0xe0, 0xea, 0x10, 0x0b, 0x01, 0x14, 0x10, 0x0d, 0x01, 0x20, 0x10, 0x0c, 0x01, 0x34, 0x04, 0x06, 0x01, 0xc3, 0x04, 0x04, 0x01, 0x00, 0x05, 0x02, 0x01, 0x02, 0x06, 0x00, 0x02, 0x00, 0x48, 0x05, 0x05, 0x01, 0x20, 0x05, 0x05, 0x01, 0x21, 0, 0, 0 }; /* 06a5:d001 - nw801 - Panasonic * P35u */ static const u8 nw801_start_1[] = { 0x05, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x0e, 0x00, 0x00, 0xf9, 0x02, 0x11, 0x00, 0x0e, 0x01, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x22, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0xa8, 0x1f, 0x00, 0x0d, 0x02, 0x07, 0x00, 0x01, 0x00, 0x19, 0x00, 0xf2, 0x00, 0x18, 0x06, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x22, 0x02, 0x80, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x15, 0x08, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x35, 0xfd, 0x07, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x20, 0x10, 0x06, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0x10, 0x40, 0x40, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x10, 0x80, 0x22, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0, 0, 0, }; static const u8 nw801_start_qvga[] = { 0x02, 0x00, 0x10, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18, 0x0b, 0x06, 0xa2, 0x86, 0x78, 0x02, 0x0f, 0x01, 0x6b, 0x10, 0x1a, 0x01, 0x15, 0x00, 0x00, 0x01, 0x1e, 0x10, 0x00, 0x01, 0x2f, 0x10, 0x8c, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x11, 0x08, 0x29, 0x00, 0x18, 0x01, 0x1f, 0x00, 0xd2, 0x00, /* AE window */ 0, 0, 0, }; static const u8 nw801_start_vga[] = { 0x02, 0x00, 0x10, 0x78, 0xa0, 0x97, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xf0, 0x02, 0x0f, 0x01, 0xd5, 0x10, 0x1a, 0x01, 0x15, 0x00, 0x00, 0x01, 0x0e, 0x10, 0x00, 0x01, 0x22, 0x10, 0x8c, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x10, 0x11, 0x08, 0x51, 0x00, 0x30, 0x02, 0x3d, 0x00, 0xa4, 0x01, 0, 0, 0, }; static const u8 nw801_start_2[] = { 0x10, 0x04, 0x01, 0x1a, 0x10, 0x19, 0x01, 0x09, /* clock */ 0x10, 0x24, 0x06, 0xc0, 0x00, 0x3f, 0x02, 0x00, 0x01, /* .. gain .. */ 0x00, 0x03, 0x02, 0x92, 0x03, 0x00, 0x1d, 0x04, 0xf2, 0x00, 0x24, 0x07, 0x00, 0x7b, 0x01, 0xcf, 0x10, 0x94, 0x01, 0x07, 0x05, 0x05, 0x01, 0x01, 0x05, 0x04, 0x01, 0x01, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x48, 0x11, 0x00, 0x37, 0x55, 0x6b, 0x7d, 0x8d, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd4, 0xdd, 0xe6, 0xef, 0xf0, 0xf0, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x08, 0x10, 0x48, 0x11, 0x00, 0x37, 0x55, 0x6b, 0x7d, 0x8d, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd4, 0xdd, 0xe6, 0xef, 0xf0, 0xf0, 0x10, 0x0b, 0x01, 0x0b, 0x10, 0x0d, 0x01, 0x0b, 0x10, 0x0c, 0x01, 0x1f, 0x05, 0x06, 0x01, 0x03, 0, 0, 0 }; /* nw802 (sharp IR3Y38M?) */ static const u8 nw802_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x00, 0x00, 0xf9, 0x02, 0x10, 0x00, 0x4d, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x16, 0x00, 0x94, 0x00, 0x10, 0x06, 0x08, 0x00, 0x18, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa1, 0x02, 0x80, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0xff, 0x01, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0xad, 0x00, 0x00, 0x01, 0x08, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x51, 0x00, 0xf0, 0x00, 0x3d, 0x00, 0xb4, 0x00, 0x10, 0x1d, 0x08, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x10, 0x0e, 0x01, 0x27, 0x10, 0x41, 0x11, 0x00, 0x0e, 0x35, 0x4f, 0x62, 0x71, 0x7f, 0x8b, 0x96, 0xa0, 0xa9, 0xb2, 0xbb, 0xc3, 0xca, 0xd2, 0xd8, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x14, 0x14, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, /* 0x00, 0x0e, 0x35, 0x4f, 0x62, 0x71, 0x7f, 0x8b, * 0x96, 0xa0, 0xa9, 0xb2, 0xbb, 0xc3, 0xca, 0xd2, * 0xd8, */ 0x10, 0x0b, 0x01, 0x10, 0x10, 0x0d, 0x01, 0x11, 0x10, 0x0c, 0x01, 0x1c, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* et31x110 - Trust 120 SpaceCam */ static const u8 spacecam_init[] = { 0x04, 0x05, 0x01, 0x01, 0x04, 0x04, 0x01, 0x01, 0x04, 0x06, 0x01, 0x04, 0x04, 0x04, 0x03, 0x00, 0x00, 0x00, 0x05, 0x05, 0x01, 0x00, 0, 0, 0 }; static const u8 spacecam_start[] = { 0x04, 0x06, 0x01, 0x44, 0x00, 0x00, 0x40, 0x10, 0x43, 0x00, 0xb4, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x0e, 0x00, 0x74, 0x01, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x3e, 0x00, 0x24, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xa0, 0x48, 0xc3, 0x02, 0x88, 0x0c, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x06, 0x00, 0x08, 0x00, 0x32, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x7c, 0x00, 0x80, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x83, 0x02, 0x20, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x04, 0x06, 0x01, 0xc0, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, 0x00, 0x80, 0x01, 0xa0, 0x10, 0x1a, 0x01, 0x00, 0x00, 0x91, 0x02, 0x32, 0x01, 0x00, 0x03, 0x02, 0x08, 0x02, 0x10, 0x00, 0x01, 0x83, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x64, 0x99, 0xc0, 0xe2, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x13, 0x13, 0x10, 0x03, 0x01, 0x06, 0x10, 0x41, 0x11, 0x00, 0x64, 0x99, 0xc0, 0xe2, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x0b, 0x01, 0x08, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1f, 0x04, 0x06, 0x01, 0xc3, 0x04, 0x05, 0x01, 0x40, 0x04, 0x04, 0x01, 0x40, 0, 0, 0 }; /* et31x110 - pas106 - other Trust SpaceCam120 */ static const u8 spacecam2_start[] = { 0x04, 0x06, 0x01, 0x44, 0x04, 0x06, 0x01, 0x00, 0x00, 0x00, 0x40, 0x14, 0x83, 0x00, 0xba, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x00, 0x00, 0x60, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x06, 0x00, 0xfc, 0x01, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb8, 0x48, 0x0f, 0x04, 0x88, 0x14, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x01, 0x00, 0x03, 0x00, 0x24, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0x00, 0x05, 0x00, 0x06, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x80, 0x02, 0x20, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x04, 0x04, 0x01, 0x40, 0x04, 0x04, 0x01, 0x00, I2C0, 0x40, 0x0c, 0x02, 0x0c, 0x12, 0x07, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x05, 0x05, I2C0, 0x40, 0x02, 0x11, 0x06, I2C0, 0x40, 0x02, 0x14, 0x00, I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, I2C0, 0x40, 0x02, 0x02, 0x0c, /* pixel clock */ I2C0, 0x40, 0x02, 0x0f, 0x00, I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ 0x10, 0x00, 0x01, 0x01, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, I2C0, 0x40, 0x02, 0x05, 0x0f, /* exposure */ I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ I2C0, 0x40, 0x07, 0x09, 0x0b, 0x0f, 0x05, 0x05, 0x0f, 0x00, /* gains */ I2C0, 0x40, 0x03, 0x12, 0x04, 0x01, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x13, 0x13, 0x10, 0x03, 0x01, 0x06, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x0b, 0x01, 0x11, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x14, 0x04, 0x06, 0x01, 0x03, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - Conceptronic Video Pro */ static const u8 cvideopro_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x54, 0x96, 0x98, 0xf9, 0x02, 0x18, 0x00, 0x4c, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0xc8, 0x00, 0xf4, 0x05, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0xc7, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x98, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x8c, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x06, 0xf2, 0x8f, 0xf0, 0x40, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xac, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x3b, 0x01, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x1f, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x1d, 0x02, 0x40, 0x06, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x46, 0x62, 0x76, 0x86, 0x94, 0xa0, 0xab, 0xb6, 0xbf, 0xc8, 0xcf, 0xd7, 0xdc, 0xdc, 0xdc, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x12, 0x12, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x46, 0x62, 0x76, 0x86, 0x94, 0xa0, 0xab, 0xb6, 0xbf, 0xc8, 0xcf, 0xd7, 0xdc, 0xdc, 0xdc, 0x10, 0x0b, 0x01, 0x09, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x2f, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - D-link dru-350c cam */ static const u8 dlink_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x00, 0x00, 0x92, 0x03, 0x10, 0x00, 0x4d, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x16, 0x00, 0x94, 0x00, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa1, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x01, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0xad, 0x00, 0x00, 0x01, 0x08, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x51, 0x00, 0xf0, 0x00, 0x3d, 0x00, 0xb4, 0x00, 0x10, 0x1d, 0x08, 0x40, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x0e, 0x01, 0x20, 0x10, 0x41, 0x11, 0x00, 0x07, 0x1e, 0x38, 0x4d, 0x60, 0x70, 0x7f, 0x8e, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd5, 0xdf, 0xea, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x11, 0x11, 0x10, 0x03, 0x01, 0x10, 0x10, 0x41, 0x11, 0x00, 0x07, 0x1e, 0x38, 0x4d, 0x60, 0x70, 0x7f, 0x8e, 0x9b, 0xa8, 0xb4, 0xbf, 0xca, 0xd5, 0xdf, 0xea, 0x10, 0x0b, 0x01, 0x19, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1e, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* 06a5:d001 - nw801 - Sony * Plustek Opticam 500U or ProLink DS3303u (Hitachi HD49322BF) */ /*fixme: 320x240 only*/ static const u8 ds3303_start[] = { 0x05, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x16, 0x00, 0x00, 0xf9, 0x02, 0x11, 0x00, 0x0e, 0x01, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x22, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9, 0xa8, 0x1f, 0x00, 0x0d, 0x02, 0x07, 0x00, 0x01, 0x00, 0x19, 0x00, 0xf2, 0x00, 0x18, 0x06, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x03, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0x50, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x05, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x2f, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x1f, 0x10, 0x08, 0x0a, 0x0a, 0x51, 0x00, 0xf1, 0x00, 0x3c, 0x00, 0xb4, 0x00, 0x01, 0x15, 0xfd, 0x07, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x04, 0x01, 0x20, 0x02, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0x10, 0x40, 0x40, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x80, 0x00, 0x2d, 0x46, 0x58, 0x67, 0x74, 0x7f, 0x88, 0x94, 0x9d, 0xa6, 0xae, 0xb5, 0xbd, 0xc4, 0xcb, 0xd1, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x10, 0x80, 0x22, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x02, 0x0a, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x00, 0xf2, 0x8f, 0x81, 0x40, 0x10, 0x1a, 0x01, 0x15, 0x10, 0x00, 0x01, 0x2f, 0x10, 0x8c, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x26, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x24, 0x02, 0x40, 0x06, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x48, 0x11, 0x00, 0x15, 0x40, 0x67, 0x84, 0x9d, 0xb2, 0xc6, 0xd6, 0xe7, 0xf6, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x16, 0x16, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x48, 0x11, 0x00, 0x15, 0x40, 0x67, 0x84, 0x9d, 0xb2, 0xc6, 0xd6, 0xe7, 0xf6, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0x10, 0x0b, 0x01, 0x26, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1c, 0x05, 0x06, 0x01, 0x03, 0x05, 0x04, 0x01, 0x00, 0, 0, 0 }; /* 06a5:d001 - nw802 - Panasonic * GP-KR651US (Philips TDA8786) */ static const u8 kr651_start_1[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x44, 0x96, 0x98, 0xf9, 0x02, 0x18, 0x00, 0x48, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0xc8, 0x00, 0xf4, 0x05, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0xc7, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x18, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x02, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0, 0, 0 }; static const u8 kr651_start_qvga[] = { 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xac, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x29, 0x00, 0x18, 0x01, 0x1f, 0x00, 0xd2, 0x00, 0x10, 0x1d, 0x06, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x10, 0x1d, 0x02, 0x28, 0x01, 0, 0, 0 }; static const u8 kr651_start_vga[] = { 0x02, 0x00, 0x11, 0x78, 0xa0, 0x8c, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x30, 0x03, 0x01, 0x82, 0x82, 0x98, 0x80, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xa0, 0x10, 0x85, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x51, 0x00, 0x30, 0x02, 0x3d, 0x00, 0xa4, 0x01, 0x10, 0x1d, 0x06, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x10, 0x1d, 0x02, 0x68, 0x00, }; static const u8 kr651_start_2[] = { 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x11, 0x3c, 0x5c, 0x74, 0x88, 0x99, 0xa8, 0xb7, 0xc4, 0xd0, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x11, 0x3c, 0x5c, 0x74, 0x88, 0x99, 0xa8, 0xb7, 0xc4, 0xd0, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0x10, 0x0b, 0x01, 0x10, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x2d, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - iRez Kritter cam */ static const u8 kritter_start[] = { 0x04, 0x06, 0x01, 0x06, 0x00, 0x00, 0x40, 0x44, 0x96, 0x98, 0x94, 0x03, 0x18, 0x00, 0x48, 0x0f, 0x1e, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0x0a, 0x01, 0x28, 0x07, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0x0e, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x18, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0b, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x02, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x8c, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x06, 0xf2, 0x8f, 0xf0, 0x40, 0x10, 0x1a, 0x01, 0x03, 0x10, 0x00, 0x01, 0xaf, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x3b, 0x01, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x1d, 0x06, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x10, 0x1d, 0x02, 0x00, 0x00, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x0d, 0x36, 0x4e, 0x60, 0x6f, 0x7b, 0x86, 0x90, 0x98, 0xa1, 0xa9, 0xb1, 0xb7, 0xbe, 0xc4, 0xcb, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0d, 0x0d, 0x10, 0x03, 0x01, 0x02, 0x10, 0x41, 0x11, 0x00, 0x0d, 0x36, 0x4e, 0x60, 0x6f, 0x7b, 0x86, 0x90, 0x98, 0xa1, 0xa9, 0xb1, 0xb7, 0xbe, 0xc4, 0xcb, 0x10, 0x0b, 0x01, 0x17, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1e, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw802 - Mustek Wcam 300 mini */ static const u8 mustek_start[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x00, 0x00, 0x92, 0x03, 0x10, 0x00, 0x4d, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x3f, 0x0f, 0x88, 0x20, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x16, 0x00, 0x94, 0x00, 0x10, 0x06, 0xfc, 0x05, 0x0c, 0x06, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa1, 0x02, 0x80, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xc0, 0x00, 0x14, 0x02, 0x00, 0x01, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x01, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x00, 0x10, 0x00, 0x01, 0xad, 0x00, 0x00, 0x01, 0x08, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1d, 0x08, 0x00, 0x20, 0x00, 0x20, 0x00, 0x20, 0x00, 0x20, 0x10, 0x0e, 0x01, 0x0f, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x29, 0x4a, 0x64, 0x7a, 0x8c, 0x9e, 0xad, 0xba, 0xc7, 0xd3, 0xde, 0xe8, 0xf1, 0xf9, 0xff, 0x10, 0x0f, 0x02, 0x11, 0x11, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x29, 0x4a, 0x64, 0x7a, 0x8c, 0x9e, 0xad, 0xba, 0xc7, 0xd3, 0xde, 0xe8, 0xf1, 0xf9, 0xff, 0x10, 0x0b, 0x01, 0x1c, 0x10, 0x0d, 0x01, 0x1a, 0x10, 0x0c, 0x01, 0x34, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x40, 0x04, 0x06, 0x01, 0x03, 0, 0, 0 }; /* nw802 - Scope USB Microscope M2 (ProScope) (Hitachi HD49322BF) */ static const u8 proscope_init[] = { 0x04, 0x05, 0x01, 0x21, 0x04, 0x04, 0x01, 0x01, 0, 0, 0 }; static const u8 proscope_start_1[] = { 0x04, 0x06, 0x01, 0x04, 0x00, 0x00, 0x40, 0x10, 0x01, 0x00, 0xf9, 0x02, 0x10, 0x00, 0x04, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x08, 0x00, 0x17, 0x00, 0xce, 0x00, 0xf4, 0x05, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0xce, 0x00, 0xf8, 0x03, 0x3e, 0x00, 0x86, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xf6, 0x03, 0x34, 0x04, 0xf6, 0x03, 0x34, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xe8, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb4, 0x6f, 0x1f, 0x0f, 0x08, 0x20, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x01, 0x00, 0x19, 0x00, 0x94, 0x00, 0x10, 0x06, 0x10, 0x00, 0x36, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x21, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xad, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x1f, 0x10, 0x08, 0x0a, 0x0a, 0x51, 0x00, 0xf1, 0x00, 0x3c, 0x00, 0xb4, 0x00, 0x49, 0x13, 0x00, 0x00, 0x8c, 0x04, 0x01, 0x20, 0x02, 0x00, 0x03, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x2d, 0x46, 0x58, 0x67, 0x74, 0x7f, 0x88, 0x94, 0x9d, 0xa6, 0xae, 0xb5, 0xbd, 0xc4, 0xcb, 0xd1, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x09, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0, 0, 0 }; static const u8 proscope_start_qvga[] = { 0x02, 0x00, 0x11, 0x3c, 0x50, 0x9e, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x10, 0x02, 0xf2, 0x8f, 0x78, 0x40, 0x10, 0x1a, 0x01, 0x06, 0x00, 0x03, 0x02, 0xf9, 0x02, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1d, 0x08, 0xc0, 0x0d, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x0e, 0x01, 0x10, 0, 0, 0 }; static const u8 proscope_start_vga[] = { 0x00, 0x03, 0x02, 0xf9, 0x02, 0x10, 0x85, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x02, 0x00, 0x11, 0x78, 0xa0, 0x8c, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x16, 0x00, 0x00, 0x82, 0x84, 0x00, 0x80, 0x10, 0x1a, 0x01, 0x06, 0x10, 0x00, 0x01, 0xa1, 0x10, 0x1b, 0x02, 0x00, 0x00, 0x10, 0x1d, 0x08, 0xc0, 0x0d, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x11, 0x08, 0x00, 0x00, 0x7f, 0x02, 0x00, 0x00, 0xdf, 0x01, 0x10, 0x0e, 0x01, 0x10, 0x10, 0x41, 0x11, 0x00, 0x10, 0x51, 0x6e, 0x83, 0x93, 0xa1, 0xae, 0xb9, 0xc3, 0xcc, 0xd4, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9, 0x10, 0x03, 0x01, 0x00, 0, 0, 0 }; static const u8 proscope_start_2[] = { 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x0c, 0x10, 0x41, 0x11, 0x00, 0x10, 0x51, 0x6e, 0x83, 0x93, 0xa1, 0xae, 0xb9, 0xc3, 0xcc, 0xd4, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9, 0x10, 0x0b, 0x01, 0x0b, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1b, 0x04, 0x06, 0x01, 0x03, 0x04, 0x05, 0x01, 0x21, 0x04, 0x04, 0x01, 0x00, 0, 0, 0 }; /* nw800 - hv7121b? (seems pas106) - Divio Chicony TwinkleCam */ static const u8 twinkle_start[] = { 0x04, 0x06, 0x01, 0x44, 0x04, 0x06, 0x01, 0x00, 0x00, 0x00, 0x40, 0x14, 0x83, 0x00, 0xba, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x00, 0x00, 0x60, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x06, 0x00, 0xfc, 0x01, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x3e, 0x00, 0x86, 0x00, 0x01, 0x00, 0x01, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x56, 0x00, 0x9e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x6e, 0x00, 0xb6, 0x00, 0x6e, 0x00, 0x78, 0x04, 0x6e, 0x00, 0xb6, 0x00, 0x01, 0x00, 0x01, 0x00, 0xca, 0x03, 0x46, 0x04, 0xca, 0x03, 0x46, 0x04, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0xf0, 0x00, 0x3e, 0x00, 0xaa, 0x00, 0x88, 0x00, 0x2e, 0x00, 0x80, 0x1f, 0xb8, 0x48, 0x0f, 0x04, 0x88, 0x14, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x01, 0x00, 0x03, 0x00, 0x24, 0x01, 0x01, 0x00, 0x16, 0x00, 0x04, 0x00, 0x4b, 0x00, 0x76, 0x00, 0x86, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0x61, 0x00, 0x05, 0x00, 0x06, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0x80, 0x02, 0x20, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x10, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x00, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1d, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, 0x00, 0x00, 0x10, 0x85, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, 0x04, 0x04, 0x01, 0x10, 0x04, 0x04, 0x01, 0x00, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x01, I2C0, 0x40, 0x0c, 0x02, 0x0c, 0x12, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, I2C0, 0x40, 0x02, 0x11, 0x06, I2C0, 0x40, 0x02, 0x14, 0x00, I2C0, 0x40, 0x02, 0x13, 0x01, /* i2c end */ I2C0, 0x40, 0x02, 0x07, 0x01, 0x02, 0x00, 0x11, 0x48, 0x58, 0x9e, 0x48, 0x58, 0x00, 0x00, 0x00, 0x00, 0x84, 0x36, 0x05, 0x01, 0xf2, 0x86, 0x65, 0x40, I2C0, 0x40, 0x02, 0x02, 0x0c, I2C0, 0x40, 0x02, 0x13, 0x01, 0x10, 0x00, 0x01, 0x01, 0x10, 0x8f, 0x0c, 0x62, 0x01, 0x24, 0x01, 0x62, 0x01, 0x24, 0x01, 0x20, 0x01, 0x60, 0x01, I2C0, 0x40, 0x02, 0x05, 0x0f, I2C0, 0x40, 0x02, 0x13, 0x01, I2C0, 0x40, 0x08, 0x08, 0x04, 0x0b, 0x01, 0x01, 0x02, 0x00, 0x17, I2C0, 0x40, 0x03, 0x12, 0x00, 0x01, 0x10, 0x11, 0x08, 0x00, 0x00, 0x5f, 0x01, 0x00, 0x00, 0x1f, 0x01, I2C0, 0x40, 0x02, 0x12, 0x00, I2C0, 0x40, 0x02, 0x0e, 0x00, I2C0, 0x40, 0x02, 0x11, 0x06, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x0c, 0x0c, 0x10, 0x03, 0x01, 0x06, 0x10, 0x41, 0x11, 0x00, 0x17, 0x3f, 0x69, 0x7b, 0x8c, 0x9a, 0xa7, 0xb3, 0xbf, 0xc9, 0xd3, 0xdd, 0xe6, 0xef, 0xf7, 0xf9, 0x10, 0x0b, 0x01, 0x19, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x0d, 0x04, 0x06, 0x01, 0x03, 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x41, 0, 0, 0 }; /* nw802 dvc-v6 */ static const u8 dvcv6_start[] = { 0x04, 0x06, 0x01, 0x06, 0x00, 0x00, 0x40, 0x54, 0x96, 0x98, 0xf9, 0x02, 0x18, 0x00, 0x4c, 0x0f, 0x1f, 0x00, 0x0d, 0x02, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x1b, 0x00, 0xc8, 0x00, 0xf4, 0x05, 0xb4, 0x00, 0xcc, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa2, 0x00, 0xc6, 0x00, 0x60, 0x00, 0xc6, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x40, 0x40, 0x00, 0xae, 0x00, 0xd2, 0x00, 0xae, 0x00, 0xd2, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0xa8, 0x00, 0xc0, 0x00, 0x66, 0x00, 0xc0, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x0a, 0x00, 0x54, 0x00, 0x10, 0x00, 0x36, 0x00, 0xd2, 0x00, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x5d, 0x00, 0xc7, 0x00, 0x7e, 0x00, 0x30, 0x00, 0x80, 0x1f, 0x98, 0x43, 0x3f, 0x0d, 0x88, 0x20, 0x80, 0x3f, 0x47, 0xaf, 0x00, 0x00, 0xa8, 0x08, 0x00, 0x11, 0x00, 0x0c, 0x02, 0x0c, 0x00, 0x1c, 0x00, 0x94, 0x00, 0x10, 0x06, 0x24, 0x00, 0x4a, 0x00, 0x02, 0x00, 0x12, 0x78, 0xa0, 0x9e, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x18, 0x0b, 0x06, 0x62, 0x82, 0xa0, 0x40, 0x20, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x04, 0x00, 0x07, 0x01, 0x10, 0x00, 0x00, 0x00, 0xff, 0x00, 0x06, 0x00, 0x02, 0x09, 0x99, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x40, 0xa0, 0x02, 0x80, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x08, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x13, 0x00, 0x00, 0xe0, 0x00, 0x0c, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x10, 0x08, 0x03, 0x00, 0x00, 0x00, 0x00, 0x20, 0x10, 0x06, 0xf7, 0xee, 0x1c, 0x1c, 0xe9, 0xfc, 0x10, 0x80, 0x10, 0x40, 0x40, 0x80, 0x00, 0x05, 0x35, 0x5e, 0x78, 0x8b, 0x99, 0xa4, 0xae, 0xb5, 0xbc, 0xc1, 0xc6, 0xc9, 0xcc, 0xcf, 0xd0, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x11, 0x22, 0x32, 0x43, 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb3, 0x10, 0x80, 0x1b, 0xc3, 0xd2, 0xe2, 0xf1, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x82, 0x02, 0xe4, 0x01, 0x40, 0x01, 0xf0, 0x00, 0x40, 0x01, 0xf0, 0x00, 0x00, 0x03, 0x02, 0x94, 0x03, 0x00, 0x1d, 0x04, 0x0a, 0x01, 0x28, 0x07, 0x00, 0x7b, 0x02, 0xe0, 0x00, 0x10, 0x8d, 0x01, 0x00, 0x00, 0x09, 0x04, 0x1e, 0x00, 0x0c, 0x02, 0x00, 0x91, 0x02, 0x0b, 0x02, 0x10, 0x00, 0x01, 0xaf, 0x02, 0x00, 0x11, 0x3c, 0x50, 0x8f, 0x3c, 0x50, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3f, 0x3f, 0x06, 0xf2, 0x8f, 0xf0, 0x40, 0x10, 0x1a, 0x01, 0x02, 0x10, 0x00, 0x01, 0xaf, 0x10, 0x85, 0x08, 0x00, 0x00, 0x3f, 0x01, 0x00, 0x00, 0xef, 0x00, 0x10, 0x1b, 0x02, 0x07, 0x01, 0x10, 0x11, 0x08, 0x61, 0x00, 0xe0, 0x00, 0x49, 0x00, 0xa8, 0x00, 0x10, 0x1f, 0x06, 0x01, 0x20, 0x02, 0xe8, 0x03, 0x00, 0x10, 0x1d, 0x02, 0x40, 0x06, 0x10, 0x0e, 0x01, 0x08, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x54, 0x6f, 0x82, 0x91, 0x9f, 0xaa, 0xb4, 0xbd, 0xc5, 0xcd, 0xd5, 0xdb, 0xdc, 0xdc, 0xdc, 0x10, 0x03, 0x01, 0x00, 0x10, 0x0f, 0x02, 0x12, 0x12, 0x10, 0x03, 0x01, 0x11, 0x10, 0x41, 0x11, 0x00, 0x0f, 0x54, 0x6f, 0x82, 0x91, 0x9f, 0xaa, 0xb4, 0xbd, 0xc5, 0xcd, 0xd5, 0xdb, 0xdc, 0xdc, 0xdc, 0x10, 0x0b, 0x01, 0x16, 0x10, 0x0d, 0x01, 0x10, 0x10, 0x0c, 0x01, 0x1a, 0x04, 0x06, 0x01, 0x03, 0x04, 0x04, 0x01, 0x00, }; static const u8 *webcam_start[] = { [Generic800] = nw800_start, [SpaceCam] = spacecam_start, [SpaceCam2] = spacecam2_start, [Cvideopro] = cvideopro_start, [Dlink350c] = dlink_start, [DS3303u] = ds3303_start, [Kr651us] = kr651_start_1, [Kritter] = kritter_start, [Mustek300] = mustek_start, [Proscope] = proscope_start_1, [Twinkle] = twinkle_start, [DvcV6] = dvcv6_start, [P35u] = nw801_start_1, [Generic802] = nw802_start, }; /* -- write a register -- */ static void reg_w(struct gspca_dev *gspca_dev, u16 index, const u8 *data, int len) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; if (len == 1) gspca_dbg(gspca_dev, D_USBO, "SET 00 0000 %04x %02x\n", index, *data); else gspca_dbg(gspca_dev, D_USBO, "SET 00 0000 %04x %02x %02x ...\n", index, *data, data[1]); memcpy(gspca_dev->usb_buf, data, len); ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x00, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, /* value */ index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_w err %d\n", ret); gspca_dev->usb_err = ret; } } /* -- read registers in usb_buf -- */ static void reg_r(struct gspca_dev *gspca_dev, u16 index, int len) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; /* * Make sure the buffer is zeroed to avoid uninitialized * values. */ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); return; } if (len == 1) gspca_dbg(gspca_dev, D_USBI, "GET 00 0000 %04x %02x\n", index, gspca_dev->usb_buf[0]); else gspca_dbg(gspca_dev, D_USBI, "GET 00 0000 %04x %02x %02x ..\n", index, gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); } static void i2c_w(struct gspca_dev *gspca_dev, u8 i2c_addr, const u8 *data, int len) { u8 val[2]; int i; reg_w(gspca_dev, 0x0600, data + 1, len - 1); reg_w(gspca_dev, 0x0600, data, len); val[0] = len; val[1] = i2c_addr; reg_w(gspca_dev, 0x0502, val, 2); val[0] = 0x01; reg_w(gspca_dev, 0x0501, val, 1); for (i = 5; --i >= 0; ) { msleep(4); reg_r(gspca_dev, 0x0505, 1); if (gspca_dev->usb_err < 0) return; if (gspca_dev->usb_buf[0] == 0) return; } gspca_dev->usb_err = -ETIME; } static void reg_w_buf(struct gspca_dev *gspca_dev, const u8 *cmd) { u16 reg; int len; for (;;) { reg = *cmd++ << 8; reg += *cmd++; len = *cmd++; if (len == 0) break; if (cmd[-3] != I2C0) reg_w(gspca_dev, reg, cmd, len); else i2c_w(gspca_dev, reg, cmd, len); cmd += len; } } static int swap_bits(int v) { int r, i; r = 0; for (i = 0; i < 8; i++) { r <<= 1; if (v & 1) r++; v >>= 1; } return r; } static void setgain(struct gspca_dev *gspca_dev, u8 val) { struct sd *sd = (struct sd *) gspca_dev; u8 v[2]; switch (sd->webcam) { case P35u: reg_w(gspca_dev, 0x1026, &val, 1); break; case Kr651us: /* 0 - 253 */ val = swap_bits(val); v[0] = val << 3; v[1] = val >> 5; reg_w(gspca_dev, 0x101d, v, 2); /* SIF reg0/1 (AGC) */ break; } } static void setexposure(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 v[2]; switch (sd->webcam) { case P35u: v[0] = ((9 - val) << 3) | 0x01; reg_w(gspca_dev, 0x1019, v, 1); break; case Cvideopro: case DvcV6: case Kritter: case Kr651us: v[0] = val; v[1] = val >> 8; reg_w(gspca_dev, 0x101b, v, 2); break; } } static void setautogain(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; int w, h; if (!val) { sd->ag_cnt = -1; return; } sd->ag_cnt = AG_CNT_START; reg_r(gspca_dev, 0x1004, 1); if (gspca_dev->usb_buf[0] & 0x04) { /* if AE_FULL_FRM */ sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; } else { /* get the AE window size */ reg_r(gspca_dev, 0x1011, 8); w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0] - (gspca_dev->usb_buf[3] << 8) - gspca_dev->usb_buf[2]; h = (gspca_dev->usb_buf[5] << 8) + gspca_dev->usb_buf[4] - (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6]; sd->ae_res = h * w; if (sd->ae_res == 0) sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; } } static int nw802_test_reg(struct gspca_dev *gspca_dev, u16 index, u8 value) { /* write the value */ reg_w(gspca_dev, index, &value, 1); /* read it */ reg_r(gspca_dev, index, 1); return gspca_dev->usb_buf[0] == value; } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; if ((unsigned) webcam >= NWEBCAMS) webcam = 0; sd->webcam = webcam; gspca_dev->cam.needs_full_bandwidth = 1; sd->ag_cnt = -1; /* * Autodetect sequence inspired from some log. * We try to detect what registers exist or not. * If 0x0500 does not exist => NW802 * If it does, test 0x109b. If it doesn't exist, * then it's a NW801. Else, a NW800 * If a et31x110 (nw800 and 06a5:d800) * get the sensor ID */ if (!nw802_test_reg(gspca_dev, 0x0500, 0x55)) { sd->bridge = BRIDGE_NW802; if (sd->webcam == Generic800) sd->webcam = Generic802; } else if (!nw802_test_reg(gspca_dev, 0x109b, 0xaa)) { sd->bridge = BRIDGE_NW801; if (sd->webcam == Generic800) sd->webcam = P35u; } else if (id->idVendor == 0x06a5 && id->idProduct == 0xd800) { reg_r(gspca_dev, 0x0403, 1); /* GPIO */ gspca_dbg(gspca_dev, D_PROBE, "et31x110 sensor type %02x\n", gspca_dev->usb_buf[0]); switch (gspca_dev->usb_buf[0] >> 1) { case 0x00: /* ?? */ if (sd->webcam == Generic800) sd->webcam = SpaceCam; break; case 0x01: /* Hynix? */ if (sd->webcam == Generic800) sd->webcam = Twinkle; break; case 0x0a: /* Pixart */ if (sd->webcam == Generic800) sd->webcam = SpaceCam2; break; } } if (webcam_chip[sd->webcam] != sd->bridge) { pr_err("Bad webcam type %d for NW80%d\n", sd->webcam, sd->bridge); gspca_dev->usb_err = -ENODEV; return gspca_dev->usb_err; } gspca_dbg(gspca_dev, D_PROBE, "Bridge nw80%d - type: %d\n", sd->bridge, sd->webcam); if (sd->bridge == BRIDGE_NW800) { switch (sd->webcam) { case DS3303u: gspca_dev->cam.cam_mode = cif_mode; /* qvga */ break; default: gspca_dev->cam.cam_mode = &cif_mode[1]; /* cif */ break; } gspca_dev->cam.nmodes = 1; } else { gspca_dev->cam.cam_mode = vga_mode; switch (sd->webcam) { case Kr651us: case Proscope: case P35u: gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); break; default: gspca_dev->cam.nmodes = 1; /* qvga only */ break; } } return gspca_dev->usb_err; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->bridge) { case BRIDGE_NW800: switch (sd->webcam) { case SpaceCam: reg_w_buf(gspca_dev, spacecam_init); break; default: reg_w_buf(gspca_dev, nw800_init); break; } break; default: switch (sd->webcam) { case Mustek300: case P35u: case Proscope: reg_w_buf(gspca_dev, proscope_init); break; } break; } return gspca_dev->usb_err; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const u8 *cmd; cmd = webcam_start[sd->webcam]; reg_w_buf(gspca_dev, cmd); switch (sd->webcam) { case P35u: if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, nw801_start_qvga); else reg_w_buf(gspca_dev, nw801_start_vga); reg_w_buf(gspca_dev, nw801_start_2); break; case Kr651us: if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, kr651_start_qvga); else reg_w_buf(gspca_dev, kr651_start_vga); reg_w_buf(gspca_dev, kr651_start_2); break; case Proscope: if (gspca_dev->pixfmt.width == 320) reg_w_buf(gspca_dev, proscope_start_qvga); else reg_w_buf(gspca_dev, proscope_start_vga); reg_w_buf(gspca_dev, proscope_start_2); break; } sd->exp_too_high_cnt = 0; sd->exp_too_low_cnt = 0; return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 value; /* 'go' off */ if (sd->bridge != BRIDGE_NW801) { value = 0x02; reg_w(gspca_dev, 0x0406, &value, 1); } /* LED off */ switch (sd->webcam) { case Cvideopro: case Kr651us: case DvcV6: case Kritter: value = 0xff; break; case Dlink350c: value = 0x21; break; case SpaceCam: case SpaceCam2: case Proscope: case Twinkle: value = 0x01; break; default: return; } reg_w(gspca_dev, 0x0404, &value, 1); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { /* * frame header = '00 00 hh ww ss xx ff ff' * with: * - 'hh': height / 4 * - 'ww': width / 4 * - 'ss': frame sequence number c0..dd */ if (data[0] == 0x00 && data[1] == 0x00 && data[6] == 0xff && data[7] == 0xff) { gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data + 8, len - 8); } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int luma; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; /* get the average luma */ reg_r(gspca_dev, sd->bridge == BRIDGE_NW801 ? 0x080d : 0x080c, 4); luma = (gspca_dev->usb_buf[3] << 24) + (gspca_dev->usb_buf[2] << 16) + (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]; luma /= sd->ae_res; switch (sd->webcam) { case P35u: gspca_coarse_grained_expo_autogain(gspca_dev, luma, 100, 5); break; default: gspca_expo_autogain(gspca_dev, luma, 100, 5, 230, 0); break; } } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { /* autogain/gain/exposure control cluster */ case V4L2_CID_AUTOGAIN: if (ctrl->is_new) setautogain(gspca_dev, ctrl->val); if (!ctrl->val) { if (gspca_dev->gain->is_new) setgain(gspca_dev, gspca_dev->gain->val); if (gspca_dev->exposure->is_new) setexposure(gspca_dev, gspca_dev->exposure->val); } break; /* Some webcams only have exposure, so handle that separately from the autogain/gain/exposure cluster in the previous case. */ case V4L2_CID_EXPOSURE: setexposure(gspca_dev, gspca_dev->exposure->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 3); switch (sd->webcam) { case P35u: gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); /* For P35u choose coarse expo auto gain function gain minimum, * to avoid a large settings jump the first auto adjustment */ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 127, 1, 127 / 5 * 2); gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 9, 1, 9); break; case Kr651us: gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 253, 1, 128); fallthrough; case Cvideopro: case DvcV6: case Kritter: gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 315, 1, 150); break; default: break; } if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } if (gspca_dev->autogain) v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x046d, 0xd001)}, {USB_DEVICE(0x0502, 0xd001)}, {USB_DEVICE(0x052b, 0xd001)}, {USB_DEVICE(0x055f, 0xd001)}, {USB_DEVICE(0x06a5, 0x0000)}, {USB_DEVICE(0x06a5, 0xd001)}, {USB_DEVICE(0x06a5, 0xd800)}, {USB_DEVICE(0x06be, 0xd001)}, {USB_DEVICE(0x0728, 0xd001)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(webcam, int, 0644); MODULE_PARM_DESC(webcam, "Webcam type\n" "0: generic\n" "1: Trust 120 SpaceCam\n" "2: other Trust 120 SpaceCam\n" "3: Conceptronic Video Pro\n" "4: D-link dru-350c\n" "5: Plustek Opticam 500U\n" "6: Panasonic GP-KR651US\n" "7: iRez Kritter\n" "8: Mustek Wcam 300 mini\n" "9: Scalar USB Microscope M2 (Proscope)\n" "10: Divio Chicony TwinkleCam\n" "11: DVC-V6\n"); |
8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 | /* * Copyright (c) 2014 Samsung Electronics Co., Ltd * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/debugfs.h> #include <linux/err.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/mutex.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_debugfs.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_file.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" /** * DOC: overview * * &struct drm_bridge represents a device that hangs on to an encoder. These are * handy when a regular &drm_encoder entity isn't enough to represent the entire * encoder chain. * * A bridge is always attached to a single &drm_encoder at a time, but can be * either connected to it directly, or through a chain of bridges:: * * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B * * Here, the output of the encoder feeds to bridge A, and that furthers feeds to * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear: * Chaining multiple bridges to the output of a bridge, or the same bridge to * the output of different bridges, is not supported. * * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes, * CRTCs, encoders or connectors and hence are not visible to userspace. They * just provide additional hooks to get the desired output at the end of the * encoder chain. */ /** * DOC: display driver integration * * Display drivers are responsible for linking encoders with the first bridge * in the chains. This is done by acquiring the appropriate bridge with * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the * encoder with a call to drm_bridge_attach(). * * Bridges are responsible for linking themselves with the next bridge in the * chain, if any. This is done the same way as for encoders, with the call to * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation. * * Once these links are created, the bridges can participate along with encoder * functions to perform mode validation and fixup (through * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode * setting (through drm_bridge_chain_mode_set()), enable (through * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable()) * and disable (through drm_atomic_bridge_chain_disable() and * drm_atomic_bridge_chain_post_disable()). Those functions call the * corresponding operations provided in &drm_bridge_funcs in sequence for all * bridges in the chain. * * For display drivers that use the atomic helpers * drm_atomic_helper_check_modeset(), * drm_atomic_helper_commit_modeset_enables() and * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled * commit check and commit tail handlers, or through the higher-level * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and * requires no intervention from the driver. For other drivers, the relevant * DRM bridge chain functions shall be called manually. * * Bridges also participate in implementing the &drm_connector at the end of * the bridge chain. Display drivers may use the drm_bridge_connector_init() * helper to create the &drm_connector, or implement it manually on top of the * connector-related operations exposed by the bridge (see the overview * documentation of bridge operations for more details). */ /** * DOC: special care dsi * * The interaction between the bridges and other frameworks involved in * the probing of the upstream driver and the bridge driver can be * challenging. Indeed, there's multiple cases that needs to be * considered: * * - The upstream driver doesn't use the component framework and isn't a * MIPI-DSI host. In this case, the bridge driver will probe at some * point and the upstream driver should try to probe again by returning * EPROBE_DEFER as long as the bridge driver hasn't probed. * * - The upstream driver doesn't use the component framework, but is a * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be * controlled. In this case, the bridge device is a child of the * display device and when it will probe it's assured that the display * device (and MIPI-DSI host) is present. The upstream driver will be * assured that the bridge driver is connected between the * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations. * Therefore, it must run mipi_dsi_host_register() in its probe * function, and then run drm_bridge_attach() in its * &mipi_dsi_host_ops.attach hook. * * - The upstream driver uses the component framework and is a MIPI-DSI * host. The bridge device uses the MIPI-DCS commands to be * controlled. This is the same situation than above, and can run * mipi_dsi_host_register() in either its probe or bind hooks. * * - The upstream driver uses the component framework and is a MIPI-DSI * host. The bridge device uses a separate bus (such as I2C) to be * controlled. In this case, there's no correlation between the probe * of the bridge and upstream drivers, so care must be taken to avoid * an endless EPROBE_DEFER loop, with each driver waiting for the * other to probe. * * The ideal pattern to cover the last item (and all the others in the * MIPI-DSI host driver case) is to split the operations like this: * * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its * probe hook. It will make sure that the MIPI-DSI host sticks around, * and that the driver's bind can be called. * * - In its probe hook, the bridge driver must try to find its MIPI-DSI * host, register as a MIPI-DSI device and attach the MIPI-DSI device * to its host. The bridge driver is now functional. * * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can * now add its component. Its bind hook will now be called and since * the bridge driver is attached and registered, we can now look for * and attach it. * * At this point, we're now certain that both the upstream driver and * the bridge driver are functional and we can't have a deadlock-like * situation when probing. */ /** * DOC: dsi bridge operations * * DSI host interfaces are expected to be implemented as bridges rather than * encoders, however there are a few aspects of their operation that need to * be defined in order to provide a consistent interface. * * A DSI host should keep the PHY powered down until the pre_enable operation is * called. All lanes are in an undefined idle state up to this point, and it * must not be assumed that it is LP-11. * pre_enable should initialise the PHY, set the data lanes to LP-11, and the * clock lane to either LP-11 or HS depending on the mode_flag * %MIPI_DSI_CLOCK_NON_CONTINUOUS. * * Ordinarily the downstream bridge DSI peripheral pre_enable will have been * called before the DSI host. If the DSI peripheral requires LP-11 and/or * the clock lane to be in HS mode prior to pre_enable, then it can set the * &pre_enable_prev_first flag to request the pre_enable (and * post_disable) order to be altered to enable the DSI host first. * * Either the CRTC being enabled, or the DSI host enable operation should switch * the host to actively transmitting video on the data lanes. * * The reverse also applies. The DSI host disable operation or stopping the CRTC * should stop transmitting video, and the data lanes should return to the LP-11 * state. The DSI host &post_disable operation should disable the PHY. * If the &pre_enable_prev_first flag is set, then the DSI peripheral's * bridge &post_disable will be called before the DSI host's post_disable. * * Whilst it is valid to call &host_transfer prior to pre_enable or after * post_disable, the exact state of the lanes is undefined at this point. The * DSI host should initialise the interface, transmit the data, and then disable * the interface again. * * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If * implemented, it therefore needs to be handled entirely within the DSI Host * driver. */ static DEFINE_MUTEX(bridge_lock); static LIST_HEAD(bridge_list); static void __drm_bridge_free(struct kref *kref) { struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); kfree(bridge->container); } /** * drm_bridge_get - Acquire a bridge reference * @bridge: DRM bridge * * This function increments the bridge's refcount. * * Returns: * Pointer to @bridge. */ struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge) { if (bridge) kref_get(&bridge->refcount); return bridge; } EXPORT_SYMBOL(drm_bridge_get); /** * drm_bridge_put - Release a bridge reference * @bridge: DRM bridge * * This function decrements the bridge's reference count and frees the * object if the reference count drops to zero. */ void drm_bridge_put(struct drm_bridge *bridge) { if (bridge) kref_put(&bridge->refcount, __drm_bridge_free); } EXPORT_SYMBOL(drm_bridge_put); /** * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer * * @data: pointer to @struct drm_bridge, cast to a void pointer * * Wrapper of drm_bridge_put() to be used when a function taking a void * pointer is needed, for example as a devm action. */ static void drm_bridge_put_void(void *data) { struct drm_bridge *bridge = (struct drm_bridge *)data; drm_bridge_put(bridge); } void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, const struct drm_bridge_funcs *funcs) { void *container; struct drm_bridge *bridge; int err; if (!funcs) { dev_warn(dev, "Missing funcs pointer\n"); return ERR_PTR(-EINVAL); } container = kzalloc(size, GFP_KERNEL); if (!container) return ERR_PTR(-ENOMEM); bridge = container + offset; bridge->container = container; bridge->funcs = funcs; kref_init(&bridge->refcount); err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge); if (err) return ERR_PTR(err); return container; } EXPORT_SYMBOL(__devm_drm_bridge_alloc); /** * drm_bridge_add - add the given bridge to the global bridge list * * @bridge: bridge control structure * * The bridge to be added must have been allocated by * devm_drm_bridge_alloc(). */ void drm_bridge_add(struct drm_bridge *bridge) { mutex_init(&bridge->hpd_mutex); if (bridge->ops & DRM_BRIDGE_OP_HDMI) bridge->ycbcr_420_allowed = !!(bridge->supported_formats & BIT(HDMI_COLORSPACE_YUV420)); mutex_lock(&bridge_lock); list_add_tail(&bridge->list, &bridge_list); mutex_unlock(&bridge_lock); } EXPORT_SYMBOL(drm_bridge_add); static void drm_bridge_remove_void(void *bridge) { drm_bridge_remove(bridge); } /** * devm_drm_bridge_add - devm managed version of drm_bridge_add() * * @dev: device to tie the bridge lifetime to * @bridge: bridge control structure * * This is the managed version of drm_bridge_add() which automatically * calls drm_bridge_remove() when @dev is unbound. * * Return: 0 if no error or negative error code. */ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge) { drm_bridge_add(bridge); return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge); } EXPORT_SYMBOL(devm_drm_bridge_add); /** * drm_bridge_remove - remove the given bridge from the global bridge list * * @bridge: bridge control structure */ void drm_bridge_remove(struct drm_bridge *bridge) { mutex_lock(&bridge_lock); list_del_init(&bridge->list); mutex_unlock(&bridge_lock); mutex_destroy(&bridge->hpd_mutex); } EXPORT_SYMBOL(drm_bridge_remove); static struct drm_private_state * drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj) { struct drm_bridge *bridge = drm_priv_to_bridge(obj); struct drm_bridge_state *state; state = bridge->funcs->atomic_duplicate_state(bridge); return state ? &state->base : NULL; } static void drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj, struct drm_private_state *s) { struct drm_bridge_state *state = drm_priv_to_bridge_state(s); struct drm_bridge *bridge = drm_priv_to_bridge(obj); bridge->funcs->atomic_destroy_state(bridge, state); } static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state, .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, }; static bool drm_bridge_is_atomic(struct drm_bridge *bridge) { return bridge->funcs->atomic_reset != NULL; } /** * drm_bridge_attach - attach the bridge to an encoder's chain * * @encoder: DRM encoder * @bridge: bridge to attach * @previous: previous bridge in the chain (optional) * @flags: DRM_BRIDGE_ATTACH_* flags * * Called by a kms driver to link the bridge to an encoder's chain. The previous * argument specifies the previous bridge in the chain. If NULL, the bridge is * linked directly at the encoder's output. Otherwise it is linked at the * previous bridge's output. * * If non-NULL the previous bridge must be already attached by a call to this * function. * * Note that bridges attached to encoders are auto-detached during encoder * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally * *not* be balanced with a drm_bridge_detach() in driver code. * * RETURNS: * Zero on success, error code on failure */ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, struct drm_bridge *previous, enum drm_bridge_attach_flags flags) { int ret; if (!encoder || !bridge) return -EINVAL; if (previous && (!previous->dev || previous->encoder != encoder)) return -EINVAL; if (bridge->dev) return -EBUSY; bridge->dev = encoder->dev; bridge->encoder = encoder; if (previous) list_add(&bridge->chain_node, &previous->chain_node); else list_add(&bridge->chain_node, &encoder->bridge_chain); if (bridge->funcs->attach) { ret = bridge->funcs->attach(bridge, encoder, flags); if (ret < 0) goto err_reset_bridge; } if (drm_bridge_is_atomic(bridge)) { struct drm_bridge_state *state; state = bridge->funcs->atomic_reset(bridge); if (IS_ERR(state)) { ret = PTR_ERR(state); goto err_detach_bridge; } drm_atomic_private_obj_init(bridge->dev, &bridge->base, &state->base, &drm_bridge_priv_state_funcs); } return 0; err_detach_bridge: if (bridge->funcs->detach) bridge->funcs->detach(bridge); err_reset_bridge: bridge->dev = NULL; bridge->encoder = NULL; list_del(&bridge->chain_node); if (ret != -EPROBE_DEFER) DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n", bridge->of_node, encoder->name, ret); else dev_err_probe(encoder->dev->dev, -EPROBE_DEFER, "failed to attach bridge %pOF to encoder %s\n", bridge->of_node, encoder->name); return ret; } EXPORT_SYMBOL(drm_bridge_attach); void drm_bridge_detach(struct drm_bridge *bridge) { if (WARN_ON(!bridge)) return; if (WARN_ON(!bridge->dev)) return; if (drm_bridge_is_atomic(bridge)) drm_atomic_private_obj_fini(&bridge->base); if (bridge->funcs->detach) bridge->funcs->detach(bridge); list_del(&bridge->chain_node); bridge->dev = NULL; } /** * DOC: bridge operations * * Bridge drivers expose operations through the &drm_bridge_funcs structure. * The DRM internals (atomic and CRTC helpers) use the helpers defined in * drm_bridge.c to call bridge operations. Those operations are divided in * three big categories to support different parts of the bridge usage. * * - The encoder-related operations support control of the bridges in the * chain, and are roughly counterparts to the &drm_encoder_helper_funcs * operations. They are used by the legacy CRTC and the atomic modeset * helpers to perform mode validation, fixup and setting, and enable and * disable the bridge automatically. * * The enable and disable operations are split in * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable, * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide * finer-grained control. * * Bridge drivers may implement the legacy version of those operations, or * the atomic version (prefixed with atomic\_), in which case they shall also * implement the atomic state bookkeeping operations * (&drm_bridge_funcs.atomic_duplicate_state, * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset). * Mixing atomic and non-atomic versions of the operations is not supported. * * - The bus format negotiation operations * &drm_bridge_funcs.atomic_get_output_bus_fmts and * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to * negotiate the formats transmitted between bridges in the chain when * multiple formats are supported. Negotiation for formats is performed * transparently for display drivers by the atomic modeset helpers. Only * atomic versions of those operations exist, bridge drivers that need to * implement them shall thus also implement the atomic version of the * encoder-related operations. This feature is not supported by the legacy * CRTC helpers. * * - The connector-related operations support implementing a &drm_connector * based on a chain of bridges. DRM bridges traditionally create a * &drm_connector for bridges meant to be used at the end of the chain. This * puts additional burden on bridge drivers, especially for bridges that may * be used in the middle of a chain or at the end of it. Furthermore, it * requires all operations of the &drm_connector to be handled by a single * bridge, which doesn't always match the hardware architecture. * * To simplify bridge drivers and make the connector implementation more * flexible, a new model allows bridges to unconditionally skip creation of * &drm_connector and instead expose &drm_bridge_funcs operations to support * an externally-implemented &drm_connector. Those operations are * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes, * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify, * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When * implemented, display drivers shall create a &drm_connector instance for * each chain of bridges, and implement those connector instances based on * the bridge connector operations. * * Bridge drivers shall implement the connector-related operations for all * the features that the bridge hardware support. For instance, if a bridge * supports reading EDID, the &drm_bridge_funcs.get_edid shall be * implemented. This however doesn't mean that the DDC lines are wired to the * bridge on a particular platform, as they could also be connected to an I2C * controller of the SoC. Support for the connector-related operations on the * running platform is reported through the &drm_bridge.ops flags. Bridge * drivers shall detect which operations they can support on the platform * (usually this information is provided by ACPI or DT), and set the * &drm_bridge.ops flags for all supported operations. A flag shall only be * set if the corresponding &drm_bridge_funcs operation is implemented, but * an implemented operation doesn't necessarily imply that the corresponding * flag will be set. Display drivers shall use the &drm_bridge.ops flags to * decide which bridge to delegate a connector operation to. This mechanism * allows providing a single static const &drm_bridge_funcs instance in * bridge drivers, improving security by storing function pointers in * read-only memory. * * In order to ease transition, bridge drivers may support both the old and * new models by making connector creation optional and implementing the * connected-related bridge operations. Connector creation is then controlled * by the flags argument to the drm_bridge_attach() function. Display drivers * that support the new model and create connectors themselves shall set the * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip * connector creation. For intermediate bridges in the chain, the flag shall * be passed to the drm_bridge_attach() call for the downstream bridge. * Bridge drivers that implement the new model only shall return an error * from their &drm_bridge_funcs.attach handler when the * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers * should use the new model, and convert the bridge drivers they use if * needed, in order to gradually transition to the new model. */ /** * drm_bridge_chain_mode_valid - validate the mode against all bridges in the * encoder chain. * @bridge: bridge control structure * @info: display info against which the mode shall be validated * @mode: desired mode to be validated * * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder * chain, starting from the first bridge to the last. If at least one bridge * does not accept the mode the function returns the error code. * * Note: the bridge passed should be the one closest to the encoder. * * RETURNS: * MODE_OK on success, drm_mode_status Enum error code on failure */ enum drm_mode_status drm_bridge_chain_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct drm_encoder *encoder; if (!bridge) return MODE_OK; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { enum drm_mode_status ret; if (!bridge->funcs->mode_valid) continue; ret = bridge->funcs->mode_valid(bridge, info, mode); if (ret != MODE_OK) return ret; } return MODE_OK; } EXPORT_SYMBOL(drm_bridge_chain_mode_valid); /** * drm_bridge_chain_mode_set - set proposed mode for all bridges in the * encoder chain * @bridge: bridge control structure * @mode: desired mode to be set for the encoder chain * @adjusted_mode: updated mode that works for this encoder chain * * Calls &drm_bridge_funcs.mode_set op for all the bridges in the * encoder chain, starting from the first bridge to the last. * * Note: the bridge passed should be the one closest to the encoder */ void drm_bridge_chain_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct drm_encoder *encoder; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { if (bridge->funcs->mode_set) bridge->funcs->mode_set(bridge, mode, adjusted_mode); } } EXPORT_SYMBOL(drm_bridge_chain_mode_set); /** * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_disable (falls back on * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain, * starting from the last bridge to the first. These are called before calling * &drm_encoder_helper_funcs.atomic_disable * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; struct drm_bridge *iter; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { if (iter->funcs->atomic_disable) { iter->funcs->atomic_disable(iter, state); } else if (iter->funcs->disable) { iter->funcs->disable(iter); } if (iter == bridge) break; } } EXPORT_SYMBOL(drm_atomic_bridge_chain_disable); static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { if (state && bridge->funcs->atomic_post_disable) bridge->funcs->atomic_post_disable(bridge, state); else if (bridge->funcs->post_disable) bridge->funcs->post_disable(bridge); } /** * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges * in the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_post_disable (falls back on * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain, * starting from the first bridge to the last. These are called after completing * &drm_encoder_helper_funcs.atomic_disable * * If a bridge sets @pre_enable_prev_first, then the @post_disable for that * bridge will be called before the previous one to reverse the @pre_enable * calling direction. * * Example: * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E * * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting * @post_disable order would be, * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C. * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; struct drm_bridge *next, *limit; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { limit = NULL; if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) { next = list_next_entry(bridge, chain_node); if (next->pre_enable_prev_first) { /* next bridge had requested that prev * was enabled first, so disabled last */ limit = next; /* Find the next bridge that has NOT requested * prev to be enabled first / disabled last */ list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { if (!next->pre_enable_prev_first) { next = list_prev_entry(next, chain_node); limit = next; break; } if (list_is_last(&next->chain_node, &encoder->bridge_chain)) { limit = next; break; } } /* Call these bridges in reverse order */ list_for_each_entry_from_reverse(next, &encoder->bridge_chain, chain_node) { if (next == bridge) break; drm_atomic_bridge_call_post_disable(next, state); } } } drm_atomic_bridge_call_post_disable(bridge, state); if (limit) /* Jump all bridges that we have already post_disabled */ bridge = limit; } } EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable); static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { if (state && bridge->funcs->atomic_pre_enable) bridge->funcs->atomic_pre_enable(bridge, state); else if (bridge->funcs->pre_enable) bridge->funcs->pre_enable(bridge); } /** * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in * the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain, * starting from the last bridge to the first. These are called before calling * &drm_encoder_helper_funcs.atomic_enable * * If a bridge sets @pre_enable_prev_first, then the pre_enable for the * prev bridge will be called before pre_enable of this bridge. * * Example: * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E * * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting * @pre_enable order would be, * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B. * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; struct drm_bridge *iter, *next, *limit; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { if (iter->pre_enable_prev_first) { next = iter; limit = bridge; list_for_each_entry_from_reverse(next, &encoder->bridge_chain, chain_node) { if (next == bridge) break; if (!next->pre_enable_prev_first) { /* Found first bridge that does NOT * request prev to be enabled first */ limit = next; break; } } list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { /* Call requested prev bridge pre_enable * in order. */ if (next == iter) /* At the first bridge to request prev * bridges called first. */ break; drm_atomic_bridge_call_pre_enable(next, state); } } drm_atomic_bridge_call_pre_enable(iter, state); if (iter->pre_enable_prev_first) /* Jump all bridges that we have already pre_enabled */ iter = limit; if (iter == bridge) break; } } EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable); /** * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_enable (falls back on * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain, * starting from the first bridge to the last. These are called after completing * &drm_encoder_helper_funcs.atomic_enable * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { if (bridge->funcs->atomic_enable) { bridge->funcs->atomic_enable(bridge, state); } else if (bridge->funcs->enable) { bridge->funcs->enable(bridge); } } } EXPORT_SYMBOL(drm_atomic_bridge_chain_enable); static int drm_atomic_bridge_check(struct drm_bridge *bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { if (bridge->funcs->atomic_check) { struct drm_bridge_state *bridge_state; int ret; bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, bridge); if (WARN_ON(!bridge_state)) return -EINVAL; ret = bridge->funcs->atomic_check(bridge, bridge_state, crtc_state, conn_state); if (ret) return ret; } else if (bridge->funcs->mode_fixup) { if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode, &crtc_state->adjusted_mode)) return -EINVAL; } return 0; } static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, struct drm_bridge *cur_bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 out_bus_fmt) { unsigned int i, num_in_bus_fmts = 0; struct drm_bridge_state *cur_state; struct drm_bridge *prev_bridge; u32 *in_bus_fmts; int ret; prev_bridge = drm_bridge_get_prev_bridge(cur_bridge); cur_state = drm_atomic_get_new_bridge_state(crtc_state->state, cur_bridge); /* * If bus format negotiation is not supported by this bridge, let's * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and * hope that it can handle this situation gracefully (by providing * appropriate default values). */ if (!cur_bridge->funcs->atomic_get_input_bus_fmts) { if (cur_bridge != first_bridge) { ret = select_bus_fmt_recursive(first_bridge, prev_bridge, crtc_state, conn_state, MEDIA_BUS_FMT_FIXED); if (ret) return ret; } /* * Driver does not implement the atomic state hooks, but that's * fine, as long as it does not access the bridge state. */ if (cur_state) { cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED; cur_state->output_bus_cfg.format = out_bus_fmt; } return 0; } /* * If the driver implements ->atomic_get_input_bus_fmts() it * should also implement the atomic state hooks. */ if (WARN_ON(!cur_state)) return -EINVAL; in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge, cur_state, crtc_state, conn_state, out_bus_fmt, &num_in_bus_fmts); if (!num_in_bus_fmts) return -ENOTSUPP; else if (!in_bus_fmts) return -ENOMEM; if (first_bridge == cur_bridge) { cur_state->input_bus_cfg.format = in_bus_fmts[0]; cur_state->output_bus_cfg.format = out_bus_fmt; kfree(in_bus_fmts); return 0; } for (i = 0; i < num_in_bus_fmts; i++) { ret = select_bus_fmt_recursive(first_bridge, prev_bridge, crtc_state, conn_state, in_bus_fmts[i]); if (ret != -ENOTSUPP) break; } if (!ret) { cur_state->input_bus_cfg.format = in_bus_fmts[i]; cur_state->output_bus_cfg.format = out_bus_fmt; } kfree(in_bus_fmts); return ret; } /* * This function is called by &drm_atomic_bridge_chain_check() just before * calling &drm_bridge_funcs.atomic_check() on all elements of the chain. * It performs bus format negotiation between bridge elements. The negotiation * happens in reverse order, starting from the last element in the chain up to * @bridge. * * Negotiation starts by retrieving supported output bus formats on the last * bridge element and testing them one by one. The test is recursive, meaning * that for each tested output format, the whole chain will be walked backward, * and each element will have to choose an input bus format that can be * transcoded to the requested output format. When a bridge element does not * support transcoding into a specific output format -ENOTSUPP is returned and * the next bridge element will have to try a different format. If none of the * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail. * * This implementation is relying on * &drm_bridge_funcs.atomic_get_output_bus_fmts() and * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported * input/output formats. * * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts() * tries a single format: &drm_connector.display_info.bus_formats[0] if * available, MEDIA_BUS_FMT_FIXED otherwise. * * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented, * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the * bridge element that lacks this hook and asks the previous element in the * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what * to do in that case (fail if they want to enforce bus format negotiation, or * provide a reasonable default if they need to support pipelines where not * all elements support bus format negotiation). */ static int drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_connector *conn = conn_state->connector; struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; unsigned int i, num_out_bus_fmts = 0; struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; last_bridge = list_last_entry(&encoder->bridge_chain, struct drm_bridge, chain_node); last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, last_bridge); if (last_bridge->funcs->atomic_get_output_bus_fmts) { const struct drm_bridge_funcs *funcs = last_bridge->funcs; /* * If the driver implements ->atomic_get_output_bus_fmts() it * should also implement the atomic state hooks. */ if (WARN_ON(!last_bridge_state)) return -EINVAL; out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge, last_bridge_state, crtc_state, conn_state, &num_out_bus_fmts); if (!num_out_bus_fmts) return -ENOTSUPP; else if (!out_bus_fmts) return -ENOMEM; } else { num_out_bus_fmts = 1; out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); if (!out_bus_fmts) return -ENOMEM; if (conn->display_info.num_bus_formats && conn->display_info.bus_formats) out_bus_fmts[0] = conn->display_info.bus_formats[0]; else out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; } for (i = 0; i < num_out_bus_fmts; i++) { ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state, conn_state, out_bus_fmts[i]); if (ret != -ENOTSUPP) break; } kfree(out_bus_fmts); return ret; } static void drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, struct drm_connector *conn, struct drm_atomic_state *state) { struct drm_bridge_state *bridge_state, *next_bridge_state; struct drm_bridge *next_bridge; u32 output_flags = 0; bridge_state = drm_atomic_get_new_bridge_state(state, bridge); /* No bridge state attached to this bridge => nothing to propagate. */ if (!bridge_state) return; next_bridge = drm_bridge_get_next_bridge(bridge); /* * Let's try to apply the most common case here, that is, propagate * display_info flags for the last bridge, and propagate the input * flags of the next bridge element to the output end of the current * bridge when the bridge is not the last one. * There are exceptions to this rule, like when signal inversion is * happening at the board level, but that's something drivers can deal * with from their &drm_bridge_funcs.atomic_check() implementation by * simply overriding the flags value we've set here. */ if (!next_bridge) { output_flags = conn->display_info.bus_flags; } else { next_bridge_state = drm_atomic_get_new_bridge_state(state, next_bridge); /* * No bridge state attached to the next bridge, just leave the * flags to 0. */ if (next_bridge_state) output_flags = next_bridge_state->input_bus_cfg.flags; } bridge_state->output_bus_cfg.flags = output_flags; /* * Propagate the output flags to the input end of the bridge. Again, it's * not necessarily what all bridges want, but that's what most of them * do, and by doing that by default we avoid forcing drivers to * duplicate the "dummy propagation" logic. */ bridge_state->input_bus_cfg.flags = output_flags; } /** * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain * @bridge: bridge control structure * @crtc_state: new CRTC state * @conn_state: new connector state * * First trigger a bus format negotiation before calling * &drm_bridge_funcs.atomic_check() (falls back on * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain, * starting from the last bridge to the first. These are called before calling * &drm_encoder_helper_funcs.atomic_check() * * RETURNS: * 0 on success, a negative error code on failure */ int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_connector *conn = conn_state->connector; struct drm_encoder *encoder; struct drm_bridge *iter; int ret; if (!bridge) return 0; ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state, conn_state); if (ret) return ret; encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { int ret; /* * Bus flags are propagated by default. If a bridge needs to * tweak the input bus flags for any reason, it should happen * in its &drm_bridge_funcs.atomic_check() implementation such * that preceding bridges in the chain can propagate the new * bus flags. */ drm_atomic_bridge_propagate_bus_flags(iter, conn, crtc_state->state); ret = drm_atomic_bridge_check(iter, crtc_state, conn_state); if (ret) return ret; if (iter == bridge) break; } return 0; } EXPORT_SYMBOL(drm_atomic_bridge_chain_check); /** * drm_bridge_detect - check if anything is attached to the bridge output * @bridge: bridge control structure * * If the bridge supports output detection, as reported by the * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the * bridge and return the connection status. Otherwise return * connector_status_unknown. * * RETURNS: * The detection status on success, or connector_status_unknown if the bridge * doesn't support output detection. */ enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge) { if (!(bridge->ops & DRM_BRIDGE_OP_DETECT)) return connector_status_unknown; return bridge->funcs->detect(bridge); } EXPORT_SYMBOL_GPL(drm_bridge_detect); /** * drm_bridge_get_modes - fill all modes currently valid for the sink into the * @connector * @bridge: bridge control structure * @connector: the connector to fill with modes * * If the bridge supports output modes retrieval, as reported by the * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to * fill the connector with all valid modes and return the number of modes * added. Otherwise return 0. * * RETURNS: * The number of modes added to the connector. */ int drm_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) { if (!(bridge->ops & DRM_BRIDGE_OP_MODES)) return 0; return bridge->funcs->get_modes(bridge, connector); } EXPORT_SYMBOL_GPL(drm_bridge_get_modes); /** * drm_bridge_edid_read - read the EDID data of the connected display * @bridge: bridge control structure * @connector: the connector to read EDID for * * If the bridge supports output EDID retrieval, as reported by the * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get * the EDID and return it. Otherwise return NULL. * * RETURNS: * The retrieved EDID on success, or NULL otherwise. */ const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { if (!(bridge->ops & DRM_BRIDGE_OP_EDID)) return NULL; return bridge->funcs->edid_read(bridge, connector); } EXPORT_SYMBOL_GPL(drm_bridge_edid_read); /** * drm_bridge_hpd_enable - enable hot plug detection for the bridge * @bridge: bridge control structure * @cb: hot-plug detection callback * @data: data to be passed to the hot-plug detection callback * * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb * and @data as hot plug notification callback. From now on the @cb will be * called with @data when an output status change is detected by the bridge, * until hot plug notification gets disabled with drm_bridge_hpd_disable(). * * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in * bridge->ops. This function shall not be called when the flag is not set. * * Only one hot plug detection callback can be registered at a time, it is an * error to call this function when hot plug detection is already enabled for * the bridge. */ void drm_bridge_hpd_enable(struct drm_bridge *bridge, void (*cb)(void *data, enum drm_connector_status status), void *data) { if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) return; mutex_lock(&bridge->hpd_mutex); if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n")) goto unlock; bridge->hpd_cb = cb; bridge->hpd_data = data; if (bridge->funcs->hpd_enable) bridge->funcs->hpd_enable(bridge); unlock: mutex_unlock(&bridge->hpd_mutex); } EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable); /** * drm_bridge_hpd_disable - disable hot plug detection for the bridge * @bridge: bridge control structure * * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot * plug detection callback previously registered with drm_bridge_hpd_enable(). * Once this function returns the callback will not be called by the bridge * when an output status change occurs. * * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in * bridge->ops. This function shall not be called when the flag is not set. */ void drm_bridge_hpd_disable(struct drm_bridge *bridge) { if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) return; mutex_lock(&bridge->hpd_mutex); if (bridge->funcs->hpd_disable) bridge->funcs->hpd_disable(bridge); bridge->hpd_cb = NULL; bridge->hpd_data = NULL; mutex_unlock(&bridge->hpd_mutex); } EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable); /** * drm_bridge_hpd_notify - notify hot plug detection events * @bridge: bridge control structure * @status: output connection status * * Bridge drivers shall call this function to report hot plug events when they * detect a change in the output status, when hot plug detection has been * enabled by drm_bridge_hpd_enable(). * * This function shall be called in a context that can sleep. */ void drm_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status) { mutex_lock(&bridge->hpd_mutex); if (bridge->hpd_cb) bridge->hpd_cb(bridge->hpd_data, status); mutex_unlock(&bridge->hpd_mutex); } EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify); #ifdef CONFIG_OF /** * of_drm_find_bridge - find the bridge corresponding to the device node in * the global bridge list * * @np: device node * * RETURNS: * drm_bridge control struct on success, NULL on failure */ struct drm_bridge *of_drm_find_bridge(struct device_node *np) { struct drm_bridge *bridge; mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) { if (bridge->of_node == np) { mutex_unlock(&bridge_lock); return bridge; } } mutex_unlock(&bridge_lock); return NULL; } EXPORT_SYMBOL(of_drm_find_bridge); #endif static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, struct drm_bridge *bridge, unsigned int idx) { drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); drm_printf(p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); if (bridge->of_node) drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); drm_printf(p, "\tops: [0x%x]", bridge->ops); if (bridge->ops & DRM_BRIDGE_OP_DETECT) drm_puts(p, " detect"); if (bridge->ops & DRM_BRIDGE_OP_EDID) drm_puts(p, " edid"); if (bridge->ops & DRM_BRIDGE_OP_HPD) drm_puts(p, " hpd"); if (bridge->ops & DRM_BRIDGE_OP_MODES) drm_puts(p, " modes"); if (bridge->ops & DRM_BRIDGE_OP_HDMI) drm_puts(p, " hdmi"); drm_puts(p, "\n"); } static int allbridges_show(struct seq_file *m, void *data) { struct drm_printer p = drm_seq_file_printer(m); struct drm_bridge *bridge; unsigned int idx = 0; mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) drm_bridge_debugfs_show_bridge(&p, bridge, idx++); mutex_unlock(&bridge_lock); return 0; } DEFINE_SHOW_ATTRIBUTE(allbridges); static int encoder_bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; struct drm_printer p = drm_seq_file_printer(m); struct drm_bridge *bridge; unsigned int idx = 0; drm_for_each_bridge_in_chain(encoder, bridge) drm_bridge_debugfs_show_bridge(&p, bridge, idx++); return 0; } DEFINE_SHOW_ATTRIBUTE(encoder_bridges); void drm_bridge_debugfs_params(struct dentry *root) { debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops); } void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder) { /* bridges list */ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops); } MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); |
3 39 1 36 3199 136 3204 3206 3209 12 3209 447 3203 3203 3214 1 3213 136 134 3074 39 3067 3032 365 3202 1 3209 3204 3202 1 3214 3203 3203 3213 3208 3208 3214 3205 3207 3203 3 3 3181 3109 64 64 63 64 64 63 29 19 29 29 21 29 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 | // SPDX-License-Identifier: GPL-2.0 /* * drivers/usb/core/generic.c - generic driver for USB devices (not interfaces) * * (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de> * * based on drivers/usb/usb.c which had the following copyrights: * (C) Copyright Linus Torvalds 1999 * (C) Copyright Johannes Erdfelt 1999-2001 * (C) Copyright Andreas Gal 1999 * (C) Copyright Gregory P. Smith 1999 * (C) Copyright Deti Fliegl 1999 (new USB architecture) * (C) Copyright Randy Dunlap 2000 * (C) Copyright David Brownell 2000-2004 * (C) Copyright Yggdrasil Computing, Inc. 2000 * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * * Released under the GPLv2 only. */ #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/string_choices.h> #include <uapi/linux/usb/audio.h> #include "usb.h" static int is_rndis(struct usb_interface_descriptor *desc) { return desc->bInterfaceClass == USB_CLASS_COMM && desc->bInterfaceSubClass == 2 && desc->bInterfaceProtocol == 0xff; } static int is_activesync(struct usb_interface_descriptor *desc) { return desc->bInterfaceClass == USB_CLASS_MISC && desc->bInterfaceSubClass == 1 && desc->bInterfaceProtocol == 1; } static bool is_audio(struct usb_interface_descriptor *desc) { return desc->bInterfaceClass == USB_CLASS_AUDIO; } static bool is_uac3_config(struct usb_interface_descriptor *desc) { return desc->bInterfaceProtocol == UAC_VERSION_3; } int usb_choose_configuration(struct usb_device *udev) { int i; int num_configs; int insufficient_power = 0; struct usb_host_config *c, *best; struct usb_device_driver *udriver; /* * If a USB device (not an interface) doesn't have a driver then the * kernel has no business trying to select or install a configuration * for it. */ if (!udev->dev.driver) return -1; udriver = to_usb_device_driver(udev->dev.driver); if (usb_device_is_owned(udev)) return 0; if (udriver->choose_configuration) { i = udriver->choose_configuration(udev); if (i >= 0) return i; } best = NULL; c = udev->config; num_configs = udev->descriptor.bNumConfigurations; for (i = 0; i < num_configs; (i++, c++)) { struct usb_interface_descriptor *desc = NULL; /* It's possible that a config has no interfaces! */ if (c->desc.bNumInterfaces > 0) desc = &c->intf_cache[0]->altsetting->desc; /* * HP's USB bus-powered keyboard has only one configuration * and it claims to be self-powered; other devices may have * similar errors in their descriptors. If the next test * were allowed to execute, such configurations would always * be rejected and the devices would not work as expected. * In the meantime, we run the risk of selecting a config * that requires external power at a time when that power * isn't available. It seems to be the lesser of two evils. * * Bugzilla #6448 reports a device that appears to crash * when it receives a GET_DEVICE_STATUS request! We don't * have any other way to tell whether a device is self-powered, * but since we don't use that information anywhere but here, * the call has been removed. * * Maybe the GET_DEVICE_STATUS call and the test below can * be reinstated when device firmwares become more reliable. * Don't hold your breath. */ #if 0 /* Rule out self-powered configs for a bus-powered device */ if (bus_powered && (c->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER)) continue; #endif /* * The next test may not be as effective as it should be. * Some hubs have errors in their descriptor, claiming * to be self-powered when they are really bus-powered. * We will overestimate the amount of current such hubs * make available for each port. * * This is a fairly benign sort of failure. It won't * cause us to reject configurations that we should have * accepted. */ /* Rule out configs that draw too much bus current */ if (usb_get_max_power(udev, c) > udev->bus_mA) { insufficient_power++; continue; } /* * Select first configuration as default for audio so that * devices that don't comply with UAC3 protocol are supported. * But, still iterate through other configurations and * select UAC3 compliant config if present. */ if (desc && is_audio(desc)) { /* Always prefer the first found UAC3 config */ if (is_uac3_config(desc)) { best = c; break; } /* If there is no UAC3 config, prefer the first config */ else if (i == 0) best = c; /* Unconditional continue, because the rest of the code * in the loop is irrelevant for audio devices, and * because it can reassign best, which for audio devices * we don't want. */ continue; } /* When the first config's first interface is one of Microsoft's * pet nonstandard Ethernet-over-USB protocols, ignore it unless * this kernel has enabled the necessary host side driver. * But: Don't ignore it if it's the only config. */ if (i == 0 && num_configs > 1 && desc && (is_rndis(desc) || is_activesync(desc))) { #if !defined(CONFIG_USB_NET_RNDIS_HOST) && !defined(CONFIG_USB_NET_RNDIS_HOST_MODULE) continue; #else best = c; #endif } /* From the remaining configs, choose the first one whose * first interface is for a non-vendor-specific class. * Reason: Linux is more likely to have a class driver * than a vendor-specific driver. */ else if (udev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC && (desc && desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC)) { best = c; break; } /* If all the remaining configs are vendor-specific, * choose the first one. */ else if (!best) best = c; } if (insufficient_power > 0) dev_info(&udev->dev, "rejected %d configuration%s " "due to insufficient available bus power\n", insufficient_power, str_plural(insufficient_power)); if (best) { i = best->desc.bConfigurationValue; dev_dbg(&udev->dev, "configuration #%d chosen from %d choice%s\n", i, num_configs, str_plural(num_configs)); } else { i = -1; dev_warn(&udev->dev, "no configuration chosen from %d choice%s\n", num_configs, str_plural(num_configs)); } return i; } EXPORT_SYMBOL_GPL(usb_choose_configuration); static int __check_for_non_generic_match(struct device_driver *drv, void *data) { struct usb_device *udev = data; struct usb_device_driver *udrv; if (!is_usb_device_driver(drv)) return 0; udrv = to_usb_device_driver(drv); if (udrv == &usb_generic_driver) return 0; return usb_driver_applicable(udev, udrv); } static bool usb_generic_driver_match(struct usb_device *udev) { if (udev->use_generic_driver) return true; /* * If any other driver wants the device, leave the device to this other * driver. */ if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_for_non_generic_match)) return false; return true; } int usb_generic_driver_probe(struct usb_device *udev) { int err, c; /* Choose and set the configuration. This registers the interfaces * with the driver core and lets interface drivers bind to them. */ if (udev->authorized == 0) dev_err(&udev->dev, "Device is not authorized for usage\n"); else { c = usb_choose_configuration(udev); if (c >= 0) { err = usb_set_configuration(udev, c); if (err && err != -ENODEV) { dev_err(&udev->dev, "can't set config #%d, error %d\n", c, err); /* This need not be fatal. The user can try to * set other configurations. */ } } } /* USB device state == configured ... usable */ usb_notify_add_device(udev); return 0; } void usb_generic_driver_disconnect(struct usb_device *udev) { usb_notify_remove_device(udev); /* if this is only an unbind, not a physical disconnect, then * unconfigure the device */ if (udev->actconfig) usb_set_configuration(udev, -1); } #ifdef CONFIG_PM int usb_generic_driver_suspend(struct usb_device *udev, pm_message_t msg) { int rc; /* Normal USB devices suspend through their upstream port. * Root hubs don't have upstream ports to suspend, * so we have to shut down their downstream HC-to-USB * interfaces manually by doing a bus (or "global") suspend. */ if (!udev->parent) rc = hcd_bus_suspend(udev, msg); /* * Non-root USB2 devices don't need to do anything for FREEZE * or PRETHAW. USB3 devices don't support global suspend and * needs to be selectively suspended. */ else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) && (udev->speed < USB_SPEED_SUPER)) rc = 0; else rc = usb_port_suspend(udev, msg); if (rc == 0) usbfs_notify_suspend(udev); return rc; } int usb_generic_driver_resume(struct usb_device *udev, pm_message_t msg) { int rc; /* Normal USB devices resume/reset through their upstream port. * Root hubs don't have upstream ports to resume or reset, * so we have to start up their downstream HC-to-USB * interfaces manually by doing a bus (or "global") resume. */ if (!udev->parent) rc = hcd_bus_resume(udev, msg); else rc = usb_port_resume(udev, msg); if (rc == 0) usbfs_notify_resume(udev); return rc; } #endif /* CONFIG_PM */ struct usb_device_driver usb_generic_driver = { .name = "usb", .match = usb_generic_driver_match, .probe = usb_generic_driver_probe, .disconnect = usb_generic_driver_disconnect, #ifdef CONFIG_PM .suspend = usb_generic_driver_suspend, .resume = usb_generic_driver_resume, #endif .supports_autosuspend = 1, }; |
7 7 6 3 2 1 1 2 3 2 1 1 1 7 7 5 7 7 2 2 7 7 7 7 7 6 3 6 1 6 6 6 7 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for multitouch panels * * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr> * Copyright (c) 2010-2013 Benjamin Tissoires <benjamin.tissoires@gmail.com> * Copyright (c) 2010-2012 Ecole Nationale de l'Aviation Civile, France * Copyright (c) 2012-2013 Red Hat, Inc * * This code is partly based on hid-egalax.c: * * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr> * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se> * Copyright (c) 2010 Canonical, Ltd. * * This code is partly based on hid-3m-pct.c: * * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se> * Copyright (c) 2010 Canonical, Ltd. */ /* */ /* * This driver is regularly tested thanks to the test suite in hid-tools[1]. * Please run these regression tests before patching this module so that * your patch won't break existing known devices. * * [1] https://gitlab.freedesktop.org/libevdev/hid-tools */ #include <linux/bits.h> #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input/mt.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/timer.h> MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_DESCRIPTION("HID multitouch panels"); MODULE_LICENSE("GPL"); #include "hid-ids.h" /* quirks to control the device */ #define MT_QUIRK_NOT_SEEN_MEANS_UP BIT(0) #define MT_QUIRK_SLOT_IS_CONTACTID BIT(1) #define MT_QUIRK_CYPRESS BIT(2) #define MT_QUIRK_SLOT_IS_CONTACTNUMBER BIT(3) #define MT_QUIRK_ALWAYS_VALID BIT(4) #define MT_QUIRK_VALID_IS_INRANGE BIT(5) #define MT_QUIRK_VALID_IS_CONFIDENCE BIT(6) #define MT_QUIRK_CONFIDENCE BIT(7) #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE BIT(8) #define MT_QUIRK_NO_AREA BIT(9) #define MT_QUIRK_IGNORE_DUPLICATES BIT(10) #define MT_QUIRK_HOVERING BIT(11) #define MT_QUIRK_CONTACT_CNT_ACCURATE BIT(12) #define MT_QUIRK_FORCE_GET_FEATURE BIT(13) #define MT_QUIRK_FIX_CONST_CONTACT_ID BIT(14) #define MT_QUIRK_TOUCH_SIZE_SCALING BIT(15) #define MT_QUIRK_STICKY_FINGERS BIT(16) #define MT_QUIRK_ASUS_CUSTOM_UP BIT(17) #define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18) #define MT_QUIRK_SEPARATE_APP_REPORT BIT(19) #define MT_QUIRK_FORCE_MULTI_INPUT BIT(20) #define MT_QUIRK_DISABLE_WAKEUP BIT(21) #define MT_QUIRK_ORIENTATION_INVERT BIT(22) #define MT_INPUTMODE_TOUCHSCREEN 0x02 #define MT_INPUTMODE_TOUCHPAD 0x03 #define MT_BUTTONTYPE_CLICKPAD 0 enum latency_mode { HID_LATENCY_NORMAL = 0, HID_LATENCY_HIGH = 1, }; enum report_mode { TOUCHPAD_REPORT_NONE = 0, TOUCHPAD_REPORT_BUTTONS = BIT(0), TOUCHPAD_REPORT_CONTACTS = BIT(1), TOUCHPAD_REPORT_ALL = TOUCHPAD_REPORT_BUTTONS | TOUCHPAD_REPORT_CONTACTS, }; #define MT_IO_FLAGS_RUNNING 0 #define MT_IO_FLAGS_ACTIVE_SLOTS 1 #define MT_IO_FLAGS_PENDING_SLOTS 2 static const bool mtrue = true; /* default for true */ static const bool mfalse; /* default for false */ static const __s32 mzero; /* default for 0 */ #define DEFAULT_TRUE ((void *)&mtrue) #define DEFAULT_FALSE ((void *)&mfalse) #define DEFAULT_ZERO ((void *)&mzero) struct mt_usages { struct list_head list; __s32 *x, *y, *cx, *cy, *p, *w, *h, *a; __s32 *contactid; /* the device ContactID assigned to this slot */ bool *tip_state; /* is the touch valid? */ bool *inrange_state; /* is the finger in proximity of the sensor? */ bool *confidence_state; /* is the touch made by a finger? */ }; struct mt_application { struct list_head list; unsigned int application; unsigned int report_id; struct list_head mt_usages; /* mt usages list */ __s32 quirks; __s32 *scantime; /* scantime reported */ __s32 scantime_logical_max; /* max value for raw scantime */ __s32 *raw_cc; /* contact count in the report */ int left_button_state; /* left button state */ unsigned int mt_flags; /* flags to pass to input-mt */ unsigned long *pending_palm_slots; /* slots where we reported palm * and need to release */ __u8 num_received; /* how many contacts we received */ __u8 num_expected; /* expected last contact index */ __u8 buttons_count; /* number of physical buttons per touchpad */ __u8 touches_by_report; /* how many touches are present in one report: * 1 means we should use a serial protocol * > 1 means hybrid (multitouch) protocol */ unsigned long jiffies; /* the frame's jiffies */ int timestamp; /* the timestamp to be sent */ int prev_scantime; /* scantime reported previously */ bool have_contact_count; }; struct mt_class { __s32 name; /* MT_CLS */ __s32 quirks; __s32 sn_move; /* Signal/noise ratio for move events */ __s32 sn_width; /* Signal/noise ratio for width events */ __s32 sn_height; /* Signal/noise ratio for height events */ __s32 sn_pressure; /* Signal/noise ratio for pressure events */ __u8 maxcontacts; bool is_indirect; /* true for touchpads */ bool export_all_inputs; /* do not ignore mouse, keyboards, etc... */ }; struct mt_report_data { struct list_head list; struct hid_report *report; struct mt_application *application; bool is_mt_collection; }; struct mt_device { struct mt_class mtclass; /* our mt device class */ struct timer_list release_timer; /* to release sticky fingers */ struct hid_device *hdev; /* hid_device we're attached to */ unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */ __u8 inputmode_value; /* InputMode HID feature value */ __u8 maxcontacts; bool is_buttonpad; /* is this device a button pad? */ bool serial_maybe; /* need to check for serial protocol */ struct list_head applications; struct list_head reports; }; static void mt_post_parse_default_settings(struct mt_device *td, struct mt_application *app); static void mt_post_parse(struct mt_device *td, struct mt_application *app); /* classes of device behavior */ #define MT_CLS_DEFAULT 0x0001 #define MT_CLS_SERIAL 0x0002 #define MT_CLS_CONFIDENCE 0x0003 #define MT_CLS_CONFIDENCE_CONTACT_ID 0x0004 #define MT_CLS_CONFIDENCE_MINUS_ONE 0x0005 #define MT_CLS_DUAL_INRANGE_CONTACTID 0x0006 #define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007 /* reserved 0x0008 */ #define MT_CLS_INRANGE_CONTACTNUMBER 0x0009 #define MT_CLS_NSMU 0x000a /* reserved 0x0010 */ /* reserved 0x0011 */ #define MT_CLS_WIN_8 0x0012 #define MT_CLS_EXPORT_ALL_INPUTS 0x0013 /* reserved 0x0014 */ #define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015 #define MT_CLS_WIN_8_DISABLE_WAKEUP 0x0016 #define MT_CLS_WIN_8_NO_STICKY_FINGERS 0x0017 #define MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU 0x0018 /* vendor specific classes */ #define MT_CLS_3M 0x0101 /* reserved 0x0102 */ #define MT_CLS_EGALAX 0x0103 #define MT_CLS_EGALAX_SERIAL 0x0104 #define MT_CLS_TOPSEED 0x0105 #define MT_CLS_PANASONIC 0x0106 #define MT_CLS_FLATFROG 0x0107 #define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108 #define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109 #define MT_CLS_LG 0x010a #define MT_CLS_ASUS 0x010b #define MT_CLS_VTL 0x0110 #define MT_CLS_GOOGLE 0x0111 #define MT_CLS_RAZER_BLADE_STEALTH 0x0112 #define MT_CLS_SMART_TECH 0x0113 #define MT_CLS_SIS 0x0457 #define MT_DEFAULT_MAXCONTACT 10 #define MT_MAX_MAXCONTACT 250 /* * Resync device and local timestamps after that many microseconds without * receiving data. */ #define MAX_TIMESTAMP_INTERVAL 1000000 #define MT_USB_DEVICE(v, p) HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p) #define MT_BT_DEVICE(v, p) HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p) /* * these device-dependent functions determine what slot corresponds * to a valid contact that was just read. */ static int cypress_compute_slot(struct mt_application *application, struct mt_usages *slot) { if (*slot->contactid != 0 || application->num_received == 0) return *slot->contactid; else return -1; } static const struct mt_class mt_classes[] = { { .name = MT_CLS_DEFAULT, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE }, { .name = MT_CLS_NSMU, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP }, { .name = MT_CLS_SERIAL, .quirks = MT_QUIRK_ALWAYS_VALID}, { .name = MT_CLS_CONFIDENCE, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE }, { .name = MT_CLS_CONFIDENCE_CONTACT_ID, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE | MT_QUIRK_SLOT_IS_CONTACTID }, { .name = MT_CLS_CONFIDENCE_MINUS_ONE, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE | MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE }, { .name = MT_CLS_DUAL_INRANGE_CONTACTID, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, { .name = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTNUMBER, .maxcontacts = 2 }, { .name = MT_CLS_INRANGE_CONTACTNUMBER, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTNUMBER }, { .name = MT_CLS_WIN_8, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_HOVERING | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_STICKY_FINGERS | MT_QUIRK_WIN8_PTP_BUTTONS, .export_all_inputs = true }, { .name = MT_CLS_EXPORT_ALL_INPUTS, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE, .export_all_inputs = true }, { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_HOVERING | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_STICKY_FINGERS | MT_QUIRK_WIN8_PTP_BUTTONS | MT_QUIRK_FORCE_MULTI_INPUT, .export_all_inputs = true }, { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, .quirks = MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_HOVERING | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_STICKY_FINGERS | MT_QUIRK_WIN8_PTP_BUTTONS | MT_QUIRK_FORCE_MULTI_INPUT | MT_QUIRK_NOT_SEEN_MEANS_UP, .export_all_inputs = true }, { .name = MT_CLS_WIN_8_DISABLE_WAKEUP, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_HOVERING | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_STICKY_FINGERS | MT_QUIRK_WIN8_PTP_BUTTONS | MT_QUIRK_DISABLE_WAKEUP, .export_all_inputs = true }, { .name = MT_CLS_WIN_8_NO_STICKY_FINGERS, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_HOVERING | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_WIN8_PTP_BUTTONS, .export_all_inputs = true }, /* * vendor specific classes */ { .name = MT_CLS_3M, .quirks = MT_QUIRK_VALID_IS_CONFIDENCE | MT_QUIRK_SLOT_IS_CONTACTID | MT_QUIRK_TOUCH_SIZE_SCALING, .sn_move = 2048, .sn_width = 128, .sn_height = 128, .maxcontacts = 60, }, { .name = MT_CLS_EGALAX, .quirks = MT_QUIRK_SLOT_IS_CONTACTID | MT_QUIRK_VALID_IS_INRANGE, .sn_move = 4096, .sn_pressure = 32, }, { .name = MT_CLS_EGALAX_SERIAL, .quirks = MT_QUIRK_SLOT_IS_CONTACTID | MT_QUIRK_ALWAYS_VALID, .sn_move = 4096, .sn_pressure = 32, }, { .name = MT_CLS_TOPSEED, .quirks = MT_QUIRK_ALWAYS_VALID, .is_indirect = true, .maxcontacts = 2, }, { .name = MT_CLS_PANASONIC, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP, .maxcontacts = 4 }, { .name = MT_CLS_GENERALTOUCH_TWOFINGERS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_SLOT_IS_CONTACTID }, { .name = MT_CLS_FLATFROG, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_NO_AREA, .sn_move = 2048, .maxcontacts = 40, }, { .name = MT_CLS_LG, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_FIX_CONST_CONTACT_ID | MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_HOVERING | MT_QUIRK_CONTACT_CNT_ACCURATE }, { .name = MT_CLS_ASUS, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_ASUS_CUSTOM_UP }, { .name = MT_CLS_VTL, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_FORCE_GET_FEATURE, }, { .name = MT_CLS_GOOGLE, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_SLOT_IS_CONTACTID | MT_QUIRK_HOVERING }, { .name = MT_CLS_RAZER_BLADE_STEALTH, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_HOVERING | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_WIN8_PTP_BUTTONS, }, { .name = MT_CLS_SMART_TECH, .quirks = MT_QUIRK_ALWAYS_VALID | MT_QUIRK_IGNORE_DUPLICATES | MT_QUIRK_CONTACT_CNT_ACCURATE | MT_QUIRK_SEPARATE_APP_REPORT, }, { .name = MT_CLS_SIS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_ALWAYS_VALID | MT_QUIRK_CONTACT_CNT_ACCURATE, }, { } }; static ssize_t mt_show_quirks(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct mt_device *td = hid_get_drvdata(hdev); return sprintf(buf, "%u\n", td->mtclass.quirks); } static ssize_t mt_set_quirks(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct mt_device *td = hid_get_drvdata(hdev); struct mt_application *application; unsigned long val; if (kstrtoul(buf, 0, &val)) return -EINVAL; td->mtclass.quirks = val; list_for_each_entry(application, &td->applications, list) { application->quirks = val; if (!application->have_contact_count) application->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; } return count; } static DEVICE_ATTR(quirks, S_IWUSR | S_IRUGO, mt_show_quirks, mt_set_quirks); static struct attribute *sysfs_attrs[] = { &dev_attr_quirks.attr, NULL }; static const struct attribute_group mt_attribute_group = { .attrs = sysfs_attrs }; static void mt_get_feature(struct hid_device *hdev, struct hid_report *report) { int ret; u32 size = hid_report_len(report); u8 *buf; /* * Do not fetch the feature report if the device has been explicitly * marked as non-capable. */ if (hdev->quirks & HID_QUIRK_NO_INIT_REPORTS) return; buf = hid_alloc_report_buf(report, GFP_KERNEL); if (!buf) return; ret = hid_hw_raw_request(hdev, report->id, buf, size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { dev_warn(&hdev->dev, "failed to fetch feature %d\n", report->id); } else { ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf, size, 0); if (ret) dev_warn(&hdev->dev, "failed to report feature\n"); } kfree(buf); } static void mt_feature_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct mt_device *td = hid_get_drvdata(hdev); switch (usage->hid) { case HID_DG_CONTACTMAX: mt_get_feature(hdev, field->report); td->maxcontacts = field->value[0]; if (!td->maxcontacts && field->logical_maximum <= MT_MAX_MAXCONTACT) td->maxcontacts = field->logical_maximum; if (td->mtclass.maxcontacts) /* check if the maxcontacts is given by the class */ td->maxcontacts = td->mtclass.maxcontacts; break; case HID_DG_BUTTONTYPE: if (usage->usage_index >= field->report_count) { dev_err(&hdev->dev, "HID_DG_BUTTONTYPE out of range\n"); break; } mt_get_feature(hdev, field->report); if (field->value[usage->usage_index] == MT_BUTTONTYPE_CLICKPAD) td->is_buttonpad = true; break; case 0xff0000c5: /* Retrieve the Win8 blob once to enable some devices */ if (usage->usage_index == 0) mt_get_feature(hdev, field->report); break; } } static void set_abs(struct input_dev *input, unsigned int code, struct hid_field *field, int snratio) { int fmin = field->logical_minimum; int fmax = field->logical_maximum; int fuzz = snratio ? (fmax - fmin) / snratio : 0; input_set_abs_params(input, code, fmin, fmax, fuzz, 0); input_abs_set_res(input, code, hidinput_calc_abs_res(field, code)); } static struct mt_usages *mt_allocate_usage(struct hid_device *hdev, struct mt_application *application) { struct mt_usages *usage; usage = devm_kzalloc(&hdev->dev, sizeof(*usage), GFP_KERNEL); if (!usage) return NULL; /* set some defaults so we do not need to check for null pointers */ usage->x = DEFAULT_ZERO; usage->y = DEFAULT_ZERO; usage->cx = DEFAULT_ZERO; usage->cy = DEFAULT_ZERO; usage->p = DEFAULT_ZERO; usage->w = DEFAULT_ZERO; usage->h = DEFAULT_ZERO; usage->a = DEFAULT_ZERO; usage->contactid = DEFAULT_ZERO; usage->tip_state = DEFAULT_FALSE; usage->inrange_state = DEFAULT_FALSE; usage->confidence_state = DEFAULT_TRUE; list_add_tail(&usage->list, &application->mt_usages); return usage; } static struct mt_application *mt_allocate_application(struct mt_device *td, struct hid_report *report) { unsigned int application = report->application; struct mt_application *mt_application; mt_application = devm_kzalloc(&td->hdev->dev, sizeof(*mt_application), GFP_KERNEL); if (!mt_application) return NULL; mt_application->application = application; INIT_LIST_HEAD(&mt_application->mt_usages); if (application == HID_DG_TOUCHSCREEN) mt_application->mt_flags |= INPUT_MT_DIRECT; /* * Model touchscreens providing buttons as touchpads. */ if (application == HID_DG_TOUCHPAD) { mt_application->mt_flags |= INPUT_MT_POINTER; td->inputmode_value = MT_INPUTMODE_TOUCHPAD; } mt_application->scantime = DEFAULT_ZERO; mt_application->raw_cc = DEFAULT_ZERO; mt_application->quirks = td->mtclass.quirks; mt_application->report_id = report->id; list_add_tail(&mt_application->list, &td->applications); return mt_application; } static struct mt_application *mt_find_application(struct mt_device *td, struct hid_report *report) { unsigned int application = report->application; struct mt_application *tmp, *mt_application = NULL; list_for_each_entry(tmp, &td->applications, list) { if (application == tmp->application) { if (!(td->mtclass.quirks & MT_QUIRK_SEPARATE_APP_REPORT) || tmp->report_id == report->id) { mt_application = tmp; break; } } } if (!mt_application) mt_application = mt_allocate_application(td, report); return mt_application; } static struct mt_report_data *mt_allocate_report_data(struct mt_device *td, struct hid_report *report) { struct mt_report_data *rdata; struct hid_field *field; int r, n; rdata = devm_kzalloc(&td->hdev->dev, sizeof(*rdata), GFP_KERNEL); if (!rdata) return NULL; rdata->report = report; rdata->application = mt_find_application(td, report); if (!rdata->application) { devm_kfree(&td->hdev->dev, rdata); return NULL; } for (r = 0; r < report->maxfield; r++) { field = report->field[r]; if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) continue; if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) { for (n = 0; n < field->report_count; n++) { if (field->usage[n].hid == HID_DG_CONTACTID) { rdata->is_mt_collection = true; break; } } } } list_add_tail(&rdata->list, &td->reports); return rdata; } static struct mt_report_data *mt_find_report_data(struct mt_device *td, struct hid_report *report) { struct mt_report_data *tmp, *rdata = NULL; list_for_each_entry(tmp, &td->reports, list) { if (report == tmp->report) { rdata = tmp; break; } } if (!rdata) rdata = mt_allocate_report_data(td, report); return rdata; } static void mt_store_field(struct hid_device *hdev, struct mt_application *application, __s32 *value, size_t offset) { struct mt_usages *usage; __s32 **target; if (list_empty(&application->mt_usages)) usage = mt_allocate_usage(hdev, application); else usage = list_last_entry(&application->mt_usages, struct mt_usages, list); if (!usage) return; target = (__s32 **)((char *)usage + offset); /* the value has already been filled, create a new slot */ if (*target != DEFAULT_TRUE && *target != DEFAULT_FALSE && *target != DEFAULT_ZERO) { if (usage->contactid == DEFAULT_ZERO || usage->x == DEFAULT_ZERO || usage->y == DEFAULT_ZERO) { hid_dbg(hdev, "ignoring duplicate usage on incomplete"); return; } usage = mt_allocate_usage(hdev, application); if (!usage) return; target = (__s32 **)((char *)usage + offset); } *target = value; } #define MT_STORE_FIELD(__name) \ mt_store_field(hdev, app, \ &field->value[usage->usage_index], \ offsetof(struct mt_usages, __name)) static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max, struct mt_application *app) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_class *cls = &td->mtclass; int code; struct hid_usage *prev_usage = NULL; /* * Model touchscreens providing buttons as touchpads. */ if (field->application == HID_DG_TOUCHSCREEN && (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) { app->mt_flags |= INPUT_MT_POINTER; td->inputmode_value = MT_INPUTMODE_TOUCHPAD; } /* count the buttons on touchpads */ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) app->buttons_count++; if (usage->usage_index) prev_usage = &field->usage[usage->usage_index - 1]; switch (usage->hid & HID_USAGE_PAGE) { case HID_UP_GENDESK: switch (usage->hid) { case HID_GD_X: if (prev_usage && (prev_usage->hid == usage->hid)) { code = ABS_MT_TOOL_X; MT_STORE_FIELD(cx); } else { code = ABS_MT_POSITION_X; MT_STORE_FIELD(x); } set_abs(hi->input, code, field, cls->sn_move); /* * A system multi-axis that exports X and Y has a high * chance of being used directly on a surface */ if (field->application == HID_GD_SYSTEM_MULTIAXIS) { __set_bit(INPUT_PROP_DIRECT, hi->input->propbit); input_set_abs_params(hi->input, ABS_MT_TOOL_TYPE, MT_TOOL_DIAL, MT_TOOL_DIAL, 0, 0); } return 1; case HID_GD_Y: if (prev_usage && (prev_usage->hid == usage->hid)) { code = ABS_MT_TOOL_Y; MT_STORE_FIELD(cy); } else { code = ABS_MT_POSITION_Y; MT_STORE_FIELD(y); } set_abs(hi->input, code, field, cls->sn_move); return 1; } return 0; case HID_UP_DIGITIZER: switch (usage->hid) { case HID_DG_INRANGE: if (app->quirks & MT_QUIRK_HOVERING) { input_set_abs_params(hi->input, ABS_MT_DISTANCE, 0, 1, 0, 0); } MT_STORE_FIELD(inrange_state); return 1; case HID_DG_CONFIDENCE: if ((cls->name == MT_CLS_WIN_8 || cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT || cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU || cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) && (field->application == HID_DG_TOUCHPAD || field->application == HID_DG_TOUCHSCREEN)) app->quirks |= MT_QUIRK_CONFIDENCE; if (app->quirks & MT_QUIRK_CONFIDENCE) input_set_abs_params(hi->input, ABS_MT_TOOL_TYPE, MT_TOOL_FINGER, MT_TOOL_PALM, 0, 0); MT_STORE_FIELD(confidence_state); return 1; case HID_DG_TIPSWITCH: if (field->application != HID_GD_SYSTEM_MULTIAXIS) input_set_capability(hi->input, EV_KEY, BTN_TOUCH); MT_STORE_FIELD(tip_state); return 1; case HID_DG_CONTACTID: MT_STORE_FIELD(contactid); app->touches_by_report++; return 1; case HID_DG_WIDTH: if (!(app->quirks & MT_QUIRK_NO_AREA)) set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field, cls->sn_width); MT_STORE_FIELD(w); return 1; case HID_DG_HEIGHT: if (!(app->quirks & MT_QUIRK_NO_AREA)) { set_abs(hi->input, ABS_MT_TOUCH_MINOR, field, cls->sn_height); /* * Only set ABS_MT_ORIENTATION if it is not * already set by the HID_DG_AZIMUTH usage. */ if (!test_bit(ABS_MT_ORIENTATION, hi->input->absbit)) input_set_abs_params(hi->input, ABS_MT_ORIENTATION, 0, 1, 0, 0); } MT_STORE_FIELD(h); return 1; case HID_DG_TIPPRESSURE: set_abs(hi->input, ABS_MT_PRESSURE, field, cls->sn_pressure); MT_STORE_FIELD(p); return 1; case HID_DG_SCANTIME: input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP); app->scantime = &field->value[usage->usage_index]; app->scantime_logical_max = field->logical_maximum; return 1; case HID_DG_CONTACTCOUNT: app->have_contact_count = true; app->raw_cc = &field->value[usage->usage_index]; return 1; case HID_DG_AZIMUTH: /* * Azimuth has the range of [0, MAX) representing a full * revolution. Set ABS_MT_ORIENTATION to a quarter of * MAX according the definition of ABS_MT_ORIENTATION */ input_set_abs_params(hi->input, ABS_MT_ORIENTATION, -field->logical_maximum / 4, field->logical_maximum / 4, cls->sn_move ? field->logical_maximum / cls->sn_move : 0, 0); MT_STORE_FIELD(a); return 1; case HID_DG_CONTACTMAX: /* contact max are global to the report */ return -1; case HID_DG_TOUCH: /* Legacy devices use TIPSWITCH and not TOUCH. * Let's just ignore this field. */ return -1; } /* let hid-input decide for the others */ return 0; case HID_UP_BUTTON: code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE); /* * MS PTP spec says that external buttons left and right have * usages 2 and 3. */ if ((app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) && field->application == HID_DG_TOUCHPAD && (usage->hid & HID_USAGE) > 1) code--; if (field->application == HID_GD_SYSTEM_MULTIAXIS) code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); if (!*bit) return -1; input_set_capability(hi->input, EV_KEY, code); return 1; case 0xff000000: /* we do not want to map these: no input-oriented meaning */ return -1; } return 0; } static int mt_compute_slot(struct mt_device *td, struct mt_application *app, struct mt_usages *slot, struct input_dev *input) { __s32 quirks = app->quirks; if (quirks & MT_QUIRK_SLOT_IS_CONTACTID) return *slot->contactid; if (quirks & MT_QUIRK_CYPRESS) return cypress_compute_slot(app, slot); if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER) return app->num_received; if (quirks & MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE) return *slot->contactid - 1; return input_mt_get_slot_by_key(input, *slot->contactid); } static void mt_release_pending_palms(struct mt_device *td, struct mt_application *app, struct input_dev *input) { int slotnum; bool need_sync = false; for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) { clear_bit(slotnum, app->pending_palm_slots); input_mt_slot(input, slotnum); input_mt_report_slot_inactive(input); need_sync = true; } if (need_sync) { input_mt_sync_frame(input); input_sync(input); } } /* * this function is called when a whole packet has been received and processed, * so that it can decide what to send to the input layer. */ static void mt_sync_frame(struct mt_device *td, struct mt_application *app, struct input_dev *input) { if (app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) input_event(input, EV_KEY, BTN_LEFT, app->left_button_state); input_mt_sync_frame(input); input_event(input, EV_MSC, MSC_TIMESTAMP, app->timestamp); input_sync(input); mt_release_pending_palms(td, app, input); app->num_received = 0; app->left_button_state = 0; if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags)) set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); else clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags); clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); } static int mt_compute_timestamp(struct mt_application *app, __s32 value) { long delta = value - app->prev_scantime; unsigned long jdelta = jiffies_to_usecs(jiffies - app->jiffies); app->jiffies = jiffies; if (delta < 0) delta += app->scantime_logical_max; /* HID_DG_SCANTIME is expressed in 100us, we want it in us. */ delta *= 100; if (jdelta > MAX_TIMESTAMP_INTERVAL) /* No data received for a while, resync the timestamp. */ return 0; else return app->timestamp + delta; } static int mt_touch_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { /* we will handle the hidinput part later, now remains hiddev */ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) hid->hiddev_hid_event(hid, field, usage, value); return 1; } static int mt_process_slot(struct mt_device *td, struct input_dev *input, struct mt_application *app, struct mt_usages *slot) { struct input_mt *mt = input->mt; struct hid_device *hdev = td->hdev; __s32 quirks = app->quirks; bool valid = true; bool confidence_state = true; bool inrange_state = false; int active; int slotnum; int tool = MT_TOOL_FINGER; if (!slot) return -EINVAL; if ((quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) && app->num_received >= app->num_expected) return -EAGAIN; if (!(quirks & MT_QUIRK_ALWAYS_VALID)) { if (quirks & MT_QUIRK_VALID_IS_INRANGE) valid = *slot->inrange_state; if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) valid = *slot->tip_state; if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE) valid = *slot->confidence_state; if (!valid) return 0; } slotnum = mt_compute_slot(td, app, slot, input); if (slotnum < 0 || slotnum >= td->maxcontacts) return 0; if ((quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) { struct input_mt_slot *i_slot = &mt->slots[slotnum]; if (input_mt_is_active(i_slot) && input_mt_is_used(mt, i_slot)) return -EAGAIN; } if (quirks & MT_QUIRK_CONFIDENCE) confidence_state = *slot->confidence_state; if (quirks & MT_QUIRK_HOVERING) inrange_state = *slot->inrange_state; active = *slot->tip_state || inrange_state; if (app->application == HID_GD_SYSTEM_MULTIAXIS) tool = MT_TOOL_DIAL; else if (unlikely(!confidence_state)) { tool = MT_TOOL_PALM; if (!active && mt && input_mt_is_active(&mt->slots[slotnum])) { /* * The non-confidence was reported for * previously valid contact that is also no * longer valid. We can't simply report * lift-off as userspace will not be aware * of non-confidence, so we need to split * it into 2 events: active MT_TOOL_PALM * and a separate liftoff. */ active = true; set_bit(slotnum, app->pending_palm_slots); } } input_mt_slot(input, slotnum); input_mt_report_slot_state(input, tool, active); if (active) { /* this finger is in proximity of the sensor */ int wide = (*slot->w > *slot->h); int major = max(*slot->w, *slot->h); int minor = min(*slot->w, *slot->h); int orientation = wide; int max_azimuth; int azimuth; int x; int y; int cx; int cy; if (slot->a != DEFAULT_ZERO) { /* * Azimuth is counter-clockwise and ranges from [0, MAX) * (a full revolution). Convert it to clockwise ranging * [-MAX/2, MAX/2]. * * Note that ABS_MT_ORIENTATION require us to report * the limit of [-MAX/4, MAX/4], but the value can go * out of range to [-MAX/2, MAX/2] to report an upside * down ellipsis. */ azimuth = *slot->a; max_azimuth = input_abs_get_max(input, ABS_MT_ORIENTATION); if (azimuth > max_azimuth * 2) azimuth -= max_azimuth * 4; orientation = -azimuth; if (quirks & MT_QUIRK_ORIENTATION_INVERT) orientation = -orientation; } if (quirks & MT_QUIRK_TOUCH_SIZE_SCALING) { /* * divided by two to match visual scale of touch * for devices with this quirk */ major = major >> 1; minor = minor >> 1; } x = hdev->quirks & HID_QUIRK_X_INVERT ? input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->x : *slot->x; y = hdev->quirks & HID_QUIRK_Y_INVERT ? input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->y : *slot->y; cx = hdev->quirks & HID_QUIRK_X_INVERT ? input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->cx : *slot->cx; cy = hdev->quirks & HID_QUIRK_Y_INVERT ? input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->cy : *slot->cy; input_event(input, EV_ABS, ABS_MT_POSITION_X, x); input_event(input, EV_ABS, ABS_MT_POSITION_Y, y); input_event(input, EV_ABS, ABS_MT_TOOL_X, cx); input_event(input, EV_ABS, ABS_MT_TOOL_Y, cy); input_event(input, EV_ABS, ABS_MT_DISTANCE, !*slot->tip_state); input_event(input, EV_ABS, ABS_MT_ORIENTATION, orientation); input_event(input, EV_ABS, ABS_MT_PRESSURE, *slot->p); input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags); } return 0; } static void mt_process_mt_event(struct hid_device *hid, struct mt_application *app, struct hid_field *field, struct hid_usage *usage, __s32 value, bool first_packet) { __s32 quirks = app->quirks; struct input_dev *input = field->hidinput->input; if (!usage->type || !(hid->claimed & HID_CLAIMED_INPUT)) return; if (quirks & MT_QUIRK_WIN8_PTP_BUTTONS) { /* * For Win8 PTP touchpads we should only look at * non finger/touch events in the first_packet of a * (possible) multi-packet frame. */ if (!first_packet) return; /* * For Win8 PTP touchpads we map both the clickpad click * and any "external" left buttons to BTN_LEFT if a * device claims to have both we need to report 1 for * BTN_LEFT if either is pressed, so we or all values * together and report the result in mt_sync_frame(). */ if (usage->type == EV_KEY && usage->code == BTN_LEFT) { app->left_button_state |= value; return; } } input_event(input, usage->type, usage->code, value); } static void mt_touch_report(struct hid_device *hid, struct mt_report_data *rdata) { struct mt_device *td = hid_get_drvdata(hid); struct hid_report *report = rdata->report; struct mt_application *app = rdata->application; struct hid_field *field; struct input_dev *input; struct mt_usages *slot; bool first_packet; unsigned count; int r, n; int scantime = 0; int contact_count = -1; /* sticky fingers release in progress, abort */ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; scantime = *app->scantime; app->timestamp = mt_compute_timestamp(app, scantime); if (app->raw_cc != DEFAULT_ZERO) contact_count = *app->raw_cc; /* * Includes multi-packet support where subsequent * packets are sent with zero contactcount. */ if (contact_count >= 0) { /* * For Win8 PTPs the first packet (td->num_received == 0) may * have a contactcount of 0 if there only is a button event. * We double check that this is not a continuation packet * of a possible multi-packet frame be checking that the * timestamp has changed. */ if ((app->quirks & MT_QUIRK_WIN8_PTP_BUTTONS) && app->num_received == 0 && app->prev_scantime != scantime) app->num_expected = contact_count; /* A non 0 contact count always indicates a first packet */ else if (contact_count) app->num_expected = contact_count; } app->prev_scantime = scantime; first_packet = app->num_received == 0; input = report->field[0]->hidinput->input; list_for_each_entry(slot, &app->mt_usages, list) { if (!mt_process_slot(td, input, app, slot)) app->num_received++; } for (r = 0; r < report->maxfield; r++) { field = report->field[r]; count = field->report_count; if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) continue; for (n = 0; n < count; n++) mt_process_mt_event(hid, app, field, &field->usage[n], field->value[n], first_packet); } if (app->num_received >= app->num_expected) mt_sync_frame(td, app, input); /* * Windows 8 specs says 2 things: * - once a contact has been reported, it has to be reported in each * subsequent report * - the report rate when fingers are present has to be at least * the refresh rate of the screen, 60 or 120 Hz * * I interprete this that the specification forces a report rate of * at least 60 Hz for a touchscreen to be certified. * Which means that if we do not get a report whithin 16 ms, either * something wrong happens, either the touchscreen forgets to send * a release. Taking a reasonable margin allows to remove issues * with USB communication or the load of the machine. * * Given that Win 8 devices are forced to send a release, this will * only affect laggish machines and the ones that have a firmware * defect. */ if (app->quirks & MT_QUIRK_STICKY_FINGERS) { if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) mod_timer(&td->release_timer, jiffies + msecs_to_jiffies(100)); else timer_delete(&td->release_timer); } clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } static int mt_touch_input_configured(struct hid_device *hdev, struct hid_input *hi, struct mt_application *app) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_class *cls = &td->mtclass; struct input_dev *input = hi->input; int ret; if (!td->maxcontacts) td->maxcontacts = MT_DEFAULT_MAXCONTACT; mt_post_parse(td, app); if (td->serial_maybe) mt_post_parse_default_settings(td, app); if (cls->is_indirect) app->mt_flags |= INPUT_MT_POINTER; if (app->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) app->mt_flags |= INPUT_MT_DROP_UNUSED; /* check for clickpads */ if ((app->mt_flags & INPUT_MT_POINTER) && (app->buttons_count == 1)) td->is_buttonpad = true; if (td->is_buttonpad) __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); app->pending_palm_slots = devm_kcalloc(&hi->input->dev, BITS_TO_LONGS(td->maxcontacts), sizeof(long), GFP_KERNEL); if (!app->pending_palm_slots) return -ENOMEM; ret = input_mt_init_slots(input, td->maxcontacts, app->mt_flags); if (ret) return ret; app->mt_flags = 0; return 0; } #define mt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, \ max, EV_KEY, (c)) static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_application *application; struct mt_report_data *rdata; rdata = mt_find_report_data(td, field->report); if (!rdata) { hid_err(hdev, "failed to allocate data for report\n"); return 0; } application = rdata->application; /* * If mtclass.export_all_inputs is not set, only map fields from * TouchScreen or TouchPad collections. We need to ignore fields * that belong to other collections such as Mouse that might have * the same GenericDesktop usages. */ if (!td->mtclass.export_all_inputs && field->application != HID_DG_TOUCHSCREEN && field->application != HID_DG_PEN && field->application != HID_DG_TOUCHPAD && field->application != HID_GD_KEYBOARD && field->application != HID_GD_SYSTEM_CONTROL && field->application != HID_CP_CONSUMER_CONTROL && field->application != HID_GD_WIRELESS_RADIO_CTLS && field->application != HID_GD_SYSTEM_MULTIAXIS && !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && application->quirks & MT_QUIRK_ASUS_CUSTOM_UP)) return -1; /* * Some Asus keyboard+touchpad devices have the hotkeys defined in the * touchpad report descriptor. We need to treat these as an array to * map usages to input keys. */ if (field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && application->quirks & MT_QUIRK_ASUS_CUSTOM_UP && (usage->hid & HID_USAGE_PAGE) == HID_UP_CUSTOM) { set_bit(EV_REP, hi->input->evbit); if (field->flags & HID_MAIN_ITEM_VARIABLE) field->flags &= ~HID_MAIN_ITEM_VARIABLE; switch (usage->hid & HID_USAGE) { case 0x10: mt_map_key_clear(KEY_BRIGHTNESSDOWN); break; case 0x20: mt_map_key_clear(KEY_BRIGHTNESSUP); break; case 0x35: mt_map_key_clear(KEY_DISPLAY_OFF); break; case 0x6b: mt_map_key_clear(KEY_F21); break; case 0x6c: mt_map_key_clear(KEY_SLEEP); break; default: return -1; } return 1; } if (rdata->is_mt_collection) return mt_touch_input_mapping(hdev, hi, field, usage, bit, max, application); /* * some egalax touchscreens have "application == DG_TOUCHSCREEN" * for the stylus. Overwrite the hid_input application */ if (field->physical == HID_DG_STYLUS) hi->application = HID_DG_STYLUS; /* let hid-core decide for the others */ return 0; } static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_report_data *rdata; rdata = mt_find_report_data(td, field->report); if (rdata && rdata->is_mt_collection) { /* We own these mappings, tell hid-input to ignore them */ return -1; } /* let hid-core decide for the others */ return 0; } static int mt_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct mt_device *td = hid_get_drvdata(hid); struct mt_report_data *rdata; rdata = mt_find_report_data(td, field->report); if (rdata && rdata->is_mt_collection) return mt_touch_event(hid, field, usage, value); return 0; } static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *size) { if (hdev->vendor == I2C_VENDOR_ID_GOODIX && (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 || hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) { if (rdesc[607] == 0x15) { rdesc[607] = 0x25; dev_info( &hdev->dev, "GT7868Q report descriptor fixup is applied.\n"); } else { dev_info( &hdev->dev, "The byte is not expected for fixing the report descriptor. \ It's possible that the touchpad firmware is not suitable for applying the fix. \ got: %x\n", rdesc[607]); } } return rdesc; } static void mt_report(struct hid_device *hid, struct hid_report *report) { struct mt_device *td = hid_get_drvdata(hid); struct hid_field *field = report->field[0]; struct mt_report_data *rdata; if (!(hid->claimed & HID_CLAIMED_INPUT)) return; rdata = mt_find_report_data(td, report); if (rdata && rdata->is_mt_collection) return mt_touch_report(hid, rdata); if (field && field->hidinput && field->hidinput->input) input_sync(field->hidinput->input); } static bool mt_need_to_apply_feature(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, enum latency_mode latency, enum report_mode report_mode, bool *inputmode_found) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_class *cls = &td->mtclass; struct hid_report *report = field->report; unsigned int index = usage->usage_index; char *buf; u32 report_len; int max; switch (usage->hid) { case HID_DG_INPUTMODE: /* * Some elan panels wrongly declare 2 input mode features, * and silently ignore when we set the value in the second * field. Skip the second feature and hope for the best. */ if (*inputmode_found) return false; if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { report_len = hid_report_len(report); buf = hid_alloc_report_buf(report, GFP_KERNEL); if (!buf) { hid_err(hdev, "failed to allocate buffer for report\n"); return false; } hid_hw_raw_request(hdev, report->id, buf, report_len, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); kfree(buf); } field->value[index] = td->inputmode_value; *inputmode_found = true; return true; case HID_DG_CONTACTMAX: if (cls->maxcontacts) { max = min_t(int, field->logical_maximum, cls->maxcontacts); if (field->value[index] != max) { field->value[index] = max; return true; } } break; case HID_DG_LATENCYMODE: field->value[index] = latency; return true; case HID_DG_SURFACESWITCH: field->value[index] = !!(report_mode & TOUCHPAD_REPORT_CONTACTS); return true; case HID_DG_BUTTONSWITCH: field->value[index] = !!(report_mode & TOUCHPAD_REPORT_BUTTONS); return true; } return false; /* no need to update the report */ } static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency, enum report_mode report_mode) { struct hid_report_enum *rep_enum; struct hid_report *rep; struct hid_usage *usage; int i, j; bool update_report; bool inputmode_found = false; rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; list_for_each_entry(rep, &rep_enum->report_list, list) { update_report = false; for (i = 0; i < rep->maxfield; i++) { /* Ignore if report count is out of bounds. */ if (rep->field[i]->report_count < 1) continue; for (j = 0; j < rep->field[i]->maxusage; j++) { usage = &rep->field[i]->usage[j]; if (mt_need_to_apply_feature(hdev, rep->field[i], usage, latency, report_mode, &inputmode_found)) update_report = true; } } if (update_report) hid_hw_request(hdev, rep, HID_REQ_SET_REPORT); } } static void mt_post_parse_default_settings(struct mt_device *td, struct mt_application *app) { __s32 quirks = app->quirks; /* unknown serial device needs special quirks */ if (list_is_singular(&app->mt_usages)) { quirks |= MT_QUIRK_ALWAYS_VALID; quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP; quirks &= ~MT_QUIRK_VALID_IS_INRANGE; quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE; quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; } app->quirks = quirks; } static void mt_post_parse(struct mt_device *td, struct mt_application *app) { if (!app->have_contact_count) app->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE; } static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct mt_device *td = hid_get_drvdata(hdev); const char *suffix = NULL; struct mt_report_data *rdata; struct mt_application *mt_application = NULL; struct hid_report *report; int ret; list_for_each_entry(report, &hi->reports, hidinput_list) { rdata = mt_find_report_data(td, report); if (!rdata) { hid_err(hdev, "failed to allocate data for report\n"); return -ENOMEM; } mt_application = rdata->application; if (rdata->is_mt_collection) { ret = mt_touch_input_configured(hdev, hi, mt_application); if (ret) return ret; } } switch (hi->application) { case HID_GD_KEYBOARD: case HID_GD_KEYPAD: case HID_GD_MOUSE: case HID_DG_TOUCHPAD: case HID_GD_SYSTEM_CONTROL: case HID_CP_CONSUMER_CONTROL: case HID_GD_WIRELESS_RADIO_CTLS: case HID_GD_SYSTEM_MULTIAXIS: /* already handled by hid core */ break; case HID_DG_TOUCHSCREEN: /* we do not set suffix = "Touchscreen" */ hi->input->name = hdev->name; break; case HID_VD_ASUS_CUSTOM_MEDIA_KEYS: suffix = "Custom Media Keys"; break; case HID_DG_STYLUS: /* force BTN_STYLUS to allow tablet matching in udev */ __set_bit(BTN_STYLUS, hi->input->keybit); break; default: suffix = "UNKNOWN"; break; } if (suffix) { hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, suffix); if (!hi->input->name) return -ENOMEM; } return 0; } static void mt_fix_const_field(struct hid_field *field, unsigned int usage) { if (field->usage[0].hid != usage || !(field->flags & HID_MAIN_ITEM_CONSTANT)) return; field->flags &= ~HID_MAIN_ITEM_CONSTANT; field->flags |= HID_MAIN_ITEM_VARIABLE; } static void mt_fix_const_fields(struct hid_device *hdev, unsigned int usage) { struct hid_report *report; int i; list_for_each_entry(report, &hdev->report_enum[HID_INPUT_REPORT].report_list, list) { if (!report->maxfield) continue; for (i = 0; i < report->maxfield; i++) if (report->field[i]->maxusage >= 1) mt_fix_const_field(report->field[i], usage); } } static void mt_release_contacts(struct hid_device *hid) { struct hid_input *hidinput; struct mt_application *application; struct mt_device *td = hid_get_drvdata(hid); list_for_each_entry(hidinput, &hid->inputs, list) { struct input_dev *input_dev = hidinput->input; struct input_mt *mt = input_dev->mt; int i; if (mt) { for (i = 0; i < mt->num_slots; i++) { input_mt_slot(input_dev, i); input_mt_report_slot_inactive(input_dev); } input_mt_sync_frame(input_dev); input_sync(input_dev); } } list_for_each_entry(application, &td->applications, list) { application->num_received = 0; } } static void mt_expired_timeout(struct timer_list *t) { struct mt_device *td = timer_container_of(td, t, release_timer); struct hid_device *hdev = td->hdev; /* * An input report came in just before we release the sticky fingers, * it will take care of the sticky fingers. */ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) mt_release_contacts(hdev); clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret, i; struct mt_device *td; const struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */ for (i = 0; mt_classes[i].name ; i++) { if (id->driver_data == mt_classes[i].name) { mtclass = &(mt_classes[i]); break; } } td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL); if (!td) { dev_err(&hdev->dev, "cannot allocate multitouch data\n"); return -ENOMEM; } td->hdev = hdev; td->mtclass = *mtclass; td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN; hid_set_drvdata(hdev, td); INIT_LIST_HEAD(&td->applications); INIT_LIST_HEAD(&td->reports); if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID) td->serial_maybe = true; /* Orientation is inverted if the X or Y axes are * flipped, but normalized if both are inverted. */ if (hdev->quirks & (HID_QUIRK_X_INVERT | HID_QUIRK_Y_INVERT) && !((hdev->quirks & HID_QUIRK_X_INVERT) && (hdev->quirks & HID_QUIRK_Y_INVERT))) td->mtclass.quirks = MT_QUIRK_ORIENTATION_INVERT; /* This allows the driver to correctly support devices * that emit events over several HID messages. */ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC; /* * This allows the driver to handle different input sensors * that emits events through different applications on the same HID * device. */ hdev->quirks |= HID_QUIRK_INPUT_PER_APP; if (id->group != HID_GROUP_MULTITOUCH_WIN_8) hdev->quirks |= HID_QUIRK_MULTI_INPUT; if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) { hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP; hdev->quirks |= HID_QUIRK_MULTI_INPUT; } timer_setup(&td->release_timer, mt_expired_timeout, 0); ret = hid_parse(hdev); if (ret != 0) return ret; if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID) mt_fix_const_fields(hdev, HID_DG_CONTACTID); if (hdev->vendor == USB_VENDOR_ID_SIS_TOUCH) hdev->quirks |= HID_QUIRK_NOGET; ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) return ret; ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group); if (ret) dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n", hdev->name); mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL); return 0; } static int mt_suspend(struct hid_device *hdev, pm_message_t state) { struct mt_device *td = hid_get_drvdata(hdev); /* High latency is desirable for power savings during S3/S0ix */ if ((td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP) || !hid_hw_may_wakeup(hdev)) mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE); else mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_ALL); return 0; } static int mt_reset_resume(struct hid_device *hdev) { mt_release_contacts(hdev); mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL); return 0; } static int mt_resume(struct hid_device *hdev) { /* Some Elan legacy devices require SET_IDLE to be set on resume. * It should be safe to send it to other devices too. * Tested on 3M, Stantum, Cypress, Zytronic, eGalax, and Elan panels. */ hid_hw_idle(hdev, 0, 0, HID_REQ_SET_IDLE); mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL); return 0; } static void mt_remove(struct hid_device *hdev) { struct mt_device *td = hid_get_drvdata(hdev); timer_delete_sync(&td->release_timer); sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); hid_hw_stop(hdev); } static void mt_on_hid_hw_open(struct hid_device *hdev) { mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL); } static void mt_on_hid_hw_close(struct hid_device *hdev) { mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE); } /* * This list contains only: * - VID/PID of products not working with the default multitouch handling * - 2 generic rules. * So there is no point in adding here any device with MT_CLS_DEFAULT. */ static const struct hid_device_id mt_devices[] = { /* 3M panels */ { .driver_data = MT_CLS_3M, MT_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, { .driver_data = MT_CLS_3M, MT_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) }, { .driver_data = MT_CLS_3M, MT_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M3266) }, /* Anton devices */ { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, MT_USB_DEVICE(USB_VENDOR_ID_ANTON, USB_DEVICE_ID_ANTON_TOUCH_PAD) }, /* Asus T101HA */ { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) }, /* Asus T304UA */ { .driver_data = MT_CLS_ASUS, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD) }, /* Atmel panels */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ATMEL, USB_DEVICE_ID_ATMEL_MXT_DIGITIZER) }, /* Baanto multitouch devices */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_BAANTO, USB_DEVICE_ID_BAANTO_MT_190W2) }, /* Cando panels */ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, /* Chunghwa Telecom touch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, /* CJTouch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH, USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH, USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040) }, /* CVTouch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, /* eGalax devices (SAW) */ { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER) }, /* eGalax devices (resistive) */ { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) }, /* eGalax devices (capacitive) */ { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_722A) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_725E) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7262) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72AA) }, { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) }, { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, /* Elan devices */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x313a) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x3148) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x32ae) }, /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, USB_DEVICE_ID_ELITEGROUP_05D8) }, /* Flatfrog Panels */ { .driver_data = MT_CLS_FLATFROG, MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, USB_DEVICE_ID_MULTITOUCH_3200) }, /* FocalTech Panels */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) }, /* GeneralTouch panel */ { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) }, { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) }, { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) }, /* Gametel game controller */ { .driver_data = MT_CLS_NSMU, MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL, USB_DEVICE_ID_GAMETEL_MT_MODE) }, /* Goodix GT7868Q devices */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01E8) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01E9) }, /* GoodTouch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH, USB_DEVICE_ID_GOODTOUCH_000f) }, /* Hanvon panels */ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) }, /* HONOR GLO-GXXX panel */ { .driver_data = MT_CLS_VTL, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 0x347d, 0x7853) }, /* HONOR MagicBook Art 14 touchpad */ { .driver_data = MT_CLS_VTL, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 0x35cc, 0x0104) }, /* Ilitek dual touch panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) }, /* LG Melfas panel */ { .driver_data = MT_CLS_LG, HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, { .driver_data = MT_CLS_LG, HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, /* Lenovo X1 TAB Gen 2 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, /* Lenovo X1 TAB Gen 3 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, /* Lenovo X12 TAB Gen 1 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) }, /* Lenovo X12 TAB Gen 2 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) }, /* Logitech devices */ { .driver_data = MT_CLS_NSMU, HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) }, { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) }, /* Novatek Panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_PCT) }, /* Ntrig Panel */ { .driver_data = MT_CLS_NSMU, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_NTRIG, 0x1b05) }, /* Panasonic panels */ { .driver_data = MT_CLS_PANASONIC, MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC, USB_DEVICE_ID_PANABOARD_UBT780) }, { .driver_data = MT_CLS_PANASONIC, MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC, USB_DEVICE_ID_PANABOARD_UBT880) }, /* PixArt optical touch screen */ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) }, { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) }, { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) }, /* PixCir-based panels */ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, MT_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) }, /* Quanta-based panels */ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID, MT_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001) }, /* Razer touchpads */ { .driver_data = MT_CLS_RAZER_BLADE_STEALTH, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0x8323) }, /* Smart Tech panels */ { .driver_data = MT_CLS_SMART_TECH, MT_USB_DEVICE(0x0b8c, 0x0092)}, /* Stantum panels */ { .driver_data = MT_CLS_CONFIDENCE, MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM)}, /* Synaptics devices */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xcd7e) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xcddc) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce08) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce09) }, /* TopSeed panels */ { .driver_data = MT_CLS_TOPSEED, MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_PERIPAD_701) }, /* Touch International panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_TOUCH_INTL, USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH) }, /* Unitec panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) }, /* VTL panels */ { .driver_data = MT_CLS_VTL, MT_USB_DEVICE(USB_VENDOR_ID_VTL, USB_DEVICE_ID_VTL_MULTITOUCH_FF3F) }, /* Winbond Electronics Corp. */ { .driver_data = MT_CLS_WIN_8_NO_STICKY_FINGERS, HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_WINBOND, USB_DEVICE_ID_TSTP_MTOUCH) }, /* Wistron panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_WISTRON, USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) }, /* XAT */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) }, /* Xiroku */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) }, { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) }, /* Google MT devices */ { .driver_data = MT_CLS_GOOGLE, HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) }, { .driver_data = MT_CLS_GOOGLE, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) }, /* sis */ { .driver_data = MT_CLS_SIS, HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH, HID_ANY_ID) }, /* Hantick */ { .driver_data = MT_CLS_NSMU, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288) }, /* Generic MT device */ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) }, /* Generic Win 8 certified MT device */ { .driver_data = MT_CLS_WIN_8, HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH_WIN_8, HID_ANY_ID, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, mt_devices); static const struct hid_usage_id mt_grabbed_usages[] = { { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} }; static struct hid_driver mt_driver = { .name = "hid-multitouch", .id_table = mt_devices, .probe = mt_probe, .remove = mt_remove, .input_mapping = mt_input_mapping, .input_mapped = mt_input_mapped, .input_configured = mt_input_configured, .feature_mapping = mt_feature_mapping, .usage_table = mt_grabbed_usages, .event = mt_event, .report_fixup = mt_report_fixup, .report = mt_report, .suspend = pm_ptr(mt_suspend), .reset_resume = pm_ptr(mt_reset_resume), .resume = pm_ptr(mt_resume), .on_hid_hw_open = mt_on_hid_hw_open, .on_hid_hw_close = mt_on_hid_hw_close, }; module_hid_driver(mt_driver); |
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 | /* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/firmware.h> #include <net/rsi_91x.h> #include "rsi_mgmt.h" #include "rsi_common.h" #include "rsi_coex.h" #include "rsi_hal.h" #include "rsi_usb.h" u32 rsi_zone_enabled = /* INFO_ZONE | INIT_ZONE | MGMT_TX_ZONE | MGMT_RX_ZONE | DATA_TX_ZONE | DATA_RX_ZONE | FSM_ZONE | ISR_ZONE | */ ERR_ZONE | 0; EXPORT_SYMBOL_GPL(rsi_zone_enabled); #ifdef CONFIG_RSI_COEX static struct rsi_proto_ops g_proto_ops = { .coex_send_pkt = rsi_coex_send_pkt, .get_host_intf = rsi_get_host_intf, .set_bt_context = rsi_set_bt_context, }; #endif /** * rsi_dbg() - This function outputs informational messages. * @zone: Zone of interest for output message. * @fmt: printf-style format for output message. * * Return: none */ void rsi_dbg(u32 zone, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (zone & rsi_zone_enabled) pr_info("%pV", &vaf); va_end(args); } EXPORT_SYMBOL_GPL(rsi_dbg); static char *opmode_str(int oper_mode) { switch (oper_mode) { case DEV_OPMODE_WIFI_ALONE: return "Wi-Fi alone"; case DEV_OPMODE_BT_ALONE: return "BT EDR alone"; case DEV_OPMODE_BT_LE_ALONE: return "BT LE alone"; case DEV_OPMODE_BT_DUAL: return "BT Dual"; case DEV_OPMODE_STA_BT: return "Wi-Fi STA + BT EDR"; case DEV_OPMODE_STA_BT_LE: return "Wi-Fi STA + BT LE"; case DEV_OPMODE_STA_BT_DUAL: return "Wi-Fi STA + BT DUAL"; case DEV_OPMODE_AP_BT: return "Wi-Fi AP + BT EDR"; case DEV_OPMODE_AP_BT_DUAL: return "Wi-Fi AP + BT DUAL"; } return "Unknown"; } void rsi_print_version(struct rsi_common *common) { rsi_dbg(ERR_ZONE, "================================================\n"); rsi_dbg(ERR_ZONE, "================ RSI Version Info ==============\n"); rsi_dbg(ERR_ZONE, "================================================\n"); rsi_dbg(ERR_ZONE, "FW Version\t: %d.%d.%d\n", common->lmac_ver.major, common->lmac_ver.minor, common->lmac_ver.release_num); rsi_dbg(ERR_ZONE, "Operating mode\t: %d [%s]", common->oper_mode, opmode_str(common->oper_mode)); rsi_dbg(ERR_ZONE, "Firmware file\t: %s", common->priv->fw_file_name); rsi_dbg(ERR_ZONE, "================================================\n"); } /** * rsi_prepare_skb() - This function prepares the skb. * @common: Pointer to the driver private structure. * @buffer: Pointer to the packet data. * @pkt_len: Length of the packet. * @extended_desc: Extended descriptor. * * Return: Successfully skb. */ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common, u8 *buffer, u32 pkt_len, u8 extended_desc) { struct sk_buff *skb = NULL; u8 payload_offset; if (WARN(!pkt_len, "%s: Dummy pkt received", __func__)) return NULL; if (pkt_len > (RSI_RCV_BUFFER_LEN * 4)) { rsi_dbg(ERR_ZONE, "%s: Pkt size > max rx buf size %d\n", __func__, pkt_len); pkt_len = RSI_RCV_BUFFER_LEN * 4; } pkt_len -= extended_desc; skb = dev_alloc_skb(pkt_len + FRAME_DESC_SZ); if (skb == NULL) return NULL; payload_offset = (extended_desc + FRAME_DESC_SZ); skb_put(skb, pkt_len); memcpy((skb->data), (buffer + payload_offset), skb->len); return skb; } /** * rsi_read_pkt() - This function reads frames from the card. * @common: Pointer to the driver private structure. * @rx_pkt: Received pkt. * @rcv_pkt_len: Received pkt length. In case of USB it is 0. * * Return: 0 on success, -1 on failure. */ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len) { u8 *frame_desc = NULL, extended_desc = 0; u32 index, length = 0, queueno = 0; u16 actual_length = 0, offset; struct sk_buff *skb = NULL; #ifdef CONFIG_RSI_COEX u8 bt_pkt_type; #endif index = 0; do { frame_desc = &rx_pkt[index]; actual_length = *(u16 *)&frame_desc[0]; offset = *(u16 *)&frame_desc[2]; if (!rcv_pkt_len && offset > RSI_MAX_RX_USB_PKT_SIZE - FRAME_DESC_SZ) goto fail; queueno = rsi_get_queueno(frame_desc, offset); length = rsi_get_length(frame_desc, offset); /* Extended descriptor is valid for WLAN queues only */ if (queueno == RSI_WIFI_DATA_Q || queueno == RSI_WIFI_MGMT_Q) extended_desc = rsi_get_extended_desc(frame_desc, offset); switch (queueno) { case RSI_COEX_Q: #ifdef CONFIG_RSI_COEX if (common->coex_mode > 1) rsi_coex_recv_pkt(common, frame_desc + offset); else #endif rsi_mgmt_pkt_recv(common, (frame_desc + offset)); break; case RSI_WIFI_DATA_Q: skb = rsi_prepare_skb(common, (frame_desc + offset), length, extended_desc); if (skb == NULL) goto fail; rsi_indicate_pkt_to_os(common, skb); break; case RSI_WIFI_MGMT_Q: rsi_mgmt_pkt_recv(common, (frame_desc + offset)); break; #ifdef CONFIG_RSI_COEX case RSI_BT_MGMT_Q: case RSI_BT_DATA_Q: #define BT_RX_PKT_TYPE_OFST 14 #define BT_CARD_READY_IND 0x89 bt_pkt_type = frame_desc[offset + BT_RX_PKT_TYPE_OFST]; if (bt_pkt_type == BT_CARD_READY_IND) { rsi_dbg(INFO_ZONE, "BT Card ready recvd\n"); if (common->fsm_state == FSM_MAC_INIT_DONE) rsi_attach_bt(common); else common->bt_defer_attach = true; } else { if (common->bt_adapter) rsi_bt_ops.recv_pkt(common->bt_adapter, frame_desc + offset); } break; #endif default: rsi_dbg(ERR_ZONE, "%s: pkt from invalid queue: %d\n", __func__, queueno); goto fail; } index += actual_length; rcv_pkt_len -= actual_length; } while (rcv_pkt_len > 0); return 0; fail: return -EINVAL; } EXPORT_SYMBOL_GPL(rsi_read_pkt); /** * rsi_tx_scheduler_thread() - This function is a kernel thread to send the * packets to the device. * @common: Pointer to the driver private structure. * * Return: None. */ static void rsi_tx_scheduler_thread(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; u32 timeout = EVENT_WAIT_FOREVER; do { if (adapter->determine_event_timeout) timeout = adapter->determine_event_timeout(adapter); rsi_wait_event(&common->tx_thread.event, timeout); rsi_reset_event(&common->tx_thread.event); if (common->init_done) rsi_core_qos_processor(common); } while (atomic_read(&common->tx_thread.thread_done) == 0); kthread_complete_and_exit(&common->tx_thread.completion, 0); } #ifdef CONFIG_RSI_COEX enum rsi_host_intf rsi_get_host_intf(void *priv) { struct rsi_common *common = priv; return common->priv->rsi_host_intf; } void rsi_set_bt_context(void *priv, void *bt_context) { struct rsi_common *common = priv; common->bt_adapter = bt_context; } #endif void rsi_attach_bt(struct rsi_common *common) { #ifdef CONFIG_RSI_COEX if (rsi_bt_ops.attach(common, &g_proto_ops)) rsi_dbg(ERR_ZONE, "Failed to attach BT module\n"); #endif } /** * rsi_91x_init() - This function initializes os interface operations. * @oper_mode: One of DEV_OPMODE_*. * * Return: Pointer to the adapter structure on success, NULL on failure . */ struct rsi_hw *rsi_91x_init(u16 oper_mode) { struct rsi_hw *adapter = NULL; struct rsi_common *common = NULL; u8 ii = 0; adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) return NULL; adapter->priv = kzalloc(sizeof(*common), GFP_KERNEL); if (adapter->priv == NULL) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of memory\n", __func__); kfree(adapter); return NULL; } else { common = adapter->priv; common->priv = adapter; } for (ii = 0; ii < NUM_SOFT_QUEUES; ii++) skb_queue_head_init(&common->tx_queue[ii]); rsi_init_event(&common->tx_thread.event); mutex_init(&common->mutex); mutex_init(&common->tx_lock); mutex_init(&common->rx_lock); mutex_init(&common->tx_bus_mutex); if (rsi_create_kthread(common, &common->tx_thread, rsi_tx_scheduler_thread, "Tx-Thread")) { rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__); goto err; } rsi_default_ps_params(adapter); init_bgscan_params(common); spin_lock_init(&adapter->ps_lock); timer_setup(&common->roc_timer, rsi_roc_timeout, 0); init_completion(&common->wlan_init_completion); adapter->device_model = RSI_DEV_9113; common->oper_mode = oper_mode; /* Determine coex mode */ switch (common->oper_mode) { case DEV_OPMODE_STA_BT_DUAL: case DEV_OPMODE_STA_BT: case DEV_OPMODE_STA_BT_LE: case DEV_OPMODE_BT_ALONE: case DEV_OPMODE_BT_LE_ALONE: case DEV_OPMODE_BT_DUAL: common->coex_mode = 2; break; case DEV_OPMODE_AP_BT_DUAL: case DEV_OPMODE_AP_BT: common->coex_mode = 4; break; case DEV_OPMODE_WIFI_ALONE: common->coex_mode = 1; break; default: common->oper_mode = 1; common->coex_mode = 1; } rsi_dbg(INFO_ZONE, "%s: oper_mode = %d, coex_mode = %d\n", __func__, common->oper_mode, common->coex_mode); adapter->device_model = RSI_DEV_9113; #ifdef CONFIG_RSI_COEX if (common->coex_mode > 1) { if (rsi_coex_attach(common)) { rsi_dbg(ERR_ZONE, "Failed to init coex module\n"); rsi_kill_thread(&common->tx_thread); goto err; } } #endif common->init_done = true; return adapter; err: kfree(common); kfree(adapter); return NULL; } EXPORT_SYMBOL_GPL(rsi_91x_init); /** * rsi_91x_deinit() - This function de-intializes os intf operations. * @adapter: Pointer to the adapter structure. * * Return: None. */ void rsi_91x_deinit(struct rsi_hw *adapter) { struct rsi_common *common = adapter->priv; u8 ii; rsi_dbg(INFO_ZONE, "%s: Performing deinit os ops\n", __func__); rsi_kill_thread(&common->tx_thread); for (ii = 0; ii < NUM_SOFT_QUEUES; ii++) skb_queue_purge(&common->tx_queue[ii]); #ifdef CONFIG_RSI_COEX if (common->coex_mode > 1) { if (common->bt_adapter) { rsi_bt_ops.detach(common->bt_adapter); common->bt_adapter = NULL; } rsi_coex_detach(common); } #endif common->init_done = false; kfree(common); kfree(adapter->rsi_dev); kfree(adapter); } EXPORT_SYMBOL_GPL(rsi_91x_deinit); /** * rsi_91x_hal_module_init() - This function is invoked when the module is * loaded into the kernel. * It registers the client driver. * @void: Void. * * Return: 0 on success, -1 on failure. */ static int rsi_91x_hal_module_init(void) { rsi_dbg(INIT_ZONE, "%s: Module init called\n", __func__); return 0; } /** * rsi_91x_hal_module_exit() - This function is called at the time of * removing/unloading the module. * It unregisters the client driver. * @void: Void. * * Return: None. */ static void rsi_91x_hal_module_exit(void) { rsi_dbg(INIT_ZONE, "%s: Module exit called\n", __func__); } module_init(rsi_91x_hal_module_init); module_exit(rsi_91x_hal_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Station driver for RSI 91x devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("Dual BSD/GPL"); |
1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2023 Thomas Weißschuh <linux@weissschuh.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/completion.h> #include <linux/device.h> #include <linux/hwmon.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/usb.h> #define DRIVER_NAME "powerz" #define POWERZ_EP_CMD_OUT 0x01 #define POWERZ_EP_DATA_IN 0x81 struct powerz_sensor_data { u8 _unknown_1[8]; __le32 V_bus; __le32 I_bus; __le32 V_bus_avg; __le32 I_bus_avg; u8 _unknown_2[8]; u8 temp[2]; __le16 V_cc1; __le16 V_cc2; __le16 V_dp; __le16 V_dm; __le16 V_dd; u8 _unknown_3[4]; } __packed; struct powerz_priv { char transfer_buffer[64]; /* first member to satisfy DMA alignment */ struct mutex mutex; struct completion completion; struct urb *urb; int status; }; static const struct hwmon_channel_info *const powerz_info[] = { HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL | HWMON_I_AVERAGE, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL | HWMON_C_AVERAGE), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL), NULL }; static int powerz_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { if (type == hwmon_curr && attr == hwmon_curr_label) { *str = "IBUS"; } else if (type == hwmon_in && attr == hwmon_in_label) { if (channel == 0) *str = "VBUS"; else if (channel == 1) *str = "VCC1"; else if (channel == 2) *str = "VCC2"; else if (channel == 3) *str = "VDP"; else if (channel == 4) *str = "VDM"; else if (channel == 5) *str = "VDD"; else return -EOPNOTSUPP; } else if (type == hwmon_temp && attr == hwmon_temp_label) { *str = "TEMP"; } else { return -EOPNOTSUPP; } return 0; } static void powerz_usb_data_complete(struct urb *urb) { struct powerz_priv *priv = urb->context; complete(&priv->completion); } static void powerz_usb_cmd_complete(struct urb *urb) { struct powerz_priv *priv = urb->context; usb_fill_bulk_urb(urb, urb->dev, usb_rcvbulkpipe(urb->dev, POWERZ_EP_DATA_IN), priv->transfer_buffer, sizeof(priv->transfer_buffer), powerz_usb_data_complete, priv); priv->status = usb_submit_urb(urb, GFP_ATOMIC); if (priv->status) complete(&priv->completion); } static int powerz_read_data(struct usb_device *udev, struct powerz_priv *priv) { int ret; priv->status = -ETIMEDOUT; reinit_completion(&priv->completion); priv->transfer_buffer[0] = 0x0c; priv->transfer_buffer[1] = 0x00; priv->transfer_buffer[2] = 0x02; priv->transfer_buffer[3] = 0x00; usb_fill_bulk_urb(priv->urb, udev, usb_sndbulkpipe(udev, POWERZ_EP_CMD_OUT), priv->transfer_buffer, 4, powerz_usb_cmd_complete, priv); ret = usb_submit_urb(priv->urb, GFP_KERNEL); if (ret) return ret; if (!wait_for_completion_interruptible_timeout (&priv->completion, msecs_to_jiffies(5))) { usb_kill_urb(priv->urb); return -EIO; } if (priv->urb->actual_length < sizeof(struct powerz_sensor_data)) return -EIO; return priv->status; } static int powerz_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct usb_interface *intf = to_usb_interface(dev->parent); struct usb_device *udev = interface_to_usbdev(intf); struct powerz_priv *priv = usb_get_intfdata(intf); struct powerz_sensor_data *data; int ret; if (!priv) return -EIO; /* disconnected */ mutex_lock(&priv->mutex); ret = powerz_read_data(udev, priv); if (ret) goto out; data = (struct powerz_sensor_data *)priv->transfer_buffer; if (type == hwmon_curr) { if (attr == hwmon_curr_input) *val = ((s32)le32_to_cpu(data->I_bus)) / 1000; else if (attr == hwmon_curr_average) *val = ((s32)le32_to_cpu(data->I_bus_avg)) / 1000; else ret = -EOPNOTSUPP; } else if (type == hwmon_in) { if (attr == hwmon_in_input) { if (channel == 0) *val = le32_to_cpu(data->V_bus) / 1000; else if (channel == 1) *val = le16_to_cpu(data->V_cc1) / 10; else if (channel == 2) *val = le16_to_cpu(data->V_cc2) / 10; else if (channel == 3) *val = le16_to_cpu(data->V_dp) / 10; else if (channel == 4) *val = le16_to_cpu(data->V_dm) / 10; else if (channel == 5) *val = le16_to_cpu(data->V_dd) / 10; else ret = -EOPNOTSUPP; } else if (attr == hwmon_in_average && channel == 0) { *val = le32_to_cpu(data->V_bus_avg) / 1000; } else { ret = -EOPNOTSUPP; } } else if (type == hwmon_temp && attr == hwmon_temp_input) { *val = data->temp[1] * 2000 + data->temp[0] * 1000 / 128; } else { ret = -EOPNOTSUPP; } out: mutex_unlock(&priv->mutex); return ret; } static const struct hwmon_ops powerz_hwmon_ops = { .visible = 0444, .read = powerz_read, .read_string = powerz_read_string, }; static const struct hwmon_chip_info powerz_chip_info = { .ops = &powerz_hwmon_ops, .info = powerz_info, }; static int powerz_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct powerz_priv *priv; struct device *hwmon_dev; struct device *parent; parent = &intf->dev; priv = devm_kzalloc(parent, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!priv->urb) return -ENOMEM; mutex_init(&priv->mutex); init_completion(&priv->completion); hwmon_dev = devm_hwmon_device_register_with_info(parent, DRIVER_NAME, priv, &powerz_chip_info, NULL); if (IS_ERR(hwmon_dev)) { usb_free_urb(priv->urb); return PTR_ERR(hwmon_dev); } usb_set_intfdata(intf, priv); return 0; } static void powerz_disconnect(struct usb_interface *intf) { struct powerz_priv *priv = usb_get_intfdata(intf); mutex_lock(&priv->mutex); usb_kill_urb(priv->urb); usb_free_urb(priv->urb); mutex_unlock(&priv->mutex); } static const struct usb_device_id powerz_id_table[] = { { USB_DEVICE_INTERFACE_NUMBER(0x5FC9, 0x0061, 0x00) }, /* ChargerLAB POWER-Z KM002C */ { USB_DEVICE_INTERFACE_NUMBER(0x5FC9, 0x0063, 0x00) }, /* ChargerLAB POWER-Z KM003C */ { } }; MODULE_DEVICE_TABLE(usb, powerz_id_table); static struct usb_driver powerz_driver = { .name = DRIVER_NAME, .id_table = powerz_id_table, .probe = powerz_probe, .disconnect = powerz_disconnect, }; module_usb_driver(powerz_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net>"); MODULE_DESCRIPTION("ChargerLAB POWER-Z USB-C tester"); |
36 69 69 69 69 69 69 33 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 | // SPDX-License-Identifier: GPL-2.0-or-later /* Linux driver for Philips webcam Various miscellaneous functions and tables. (C) 1999-2003 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. */ #include "pwc.h" const int pwc_image_sizes[PSZ_MAX][2] = { { 128, 96 }, /* sqcif */ { 160, 120 }, /* qsif */ { 176, 144 }, /* qcif */ { 320, 240 }, /* sif */ { 352, 288 }, /* cif */ { 640, 480 }, /* vga */ }; /* x,y -> PSZ_ */ int pwc_get_size(struct pwc_device *pdev, int width, int height) { int i; /* Find the largest size supported by the camera that fits into the requested size. */ for (i = PSZ_MAX - 1; i >= 0; i--) { if (!(pdev->image_mask & (1 << i))) continue; if (pwc_image_sizes[i][0] <= width && pwc_image_sizes[i][1] <= height) return i; } /* No mode found, return the smallest mode we have */ for (i = 0; i < PSZ_MAX; i++) { if (pdev->image_mask & (1 << i)) return i; } /* Never reached there always is at least one supported mode */ return 0; } /* initialize variables depending on type and decompressor */ void pwc_construct(struct pwc_device *pdev) { if (DEVICE_USE_CODEC1(pdev->type)) { pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QCIF | 1 << PSZ_CIF; pdev->vcinterface = 2; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } else if (DEVICE_USE_CODEC3(pdev->type)) { pdev->image_mask = 1 << PSZ_QSIF | 1 << PSZ_SIF | 1 << PSZ_VGA; pdev->vcinterface = 3; pdev->vendpoint = 5; pdev->frame_header_size = TOUCAM_HEADER_SIZE; pdev->frame_trailer_size = TOUCAM_TRAILER_SIZE; } else /* if (DEVICE_USE_CODEC2(pdev->type)) */ { pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QSIF | 1 << PSZ_QCIF | 1 << PSZ_SIF | 1 << PSZ_CIF | 1 << PSZ_VGA; pdev->vcinterface = 3; pdev->vendpoint = 4; pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } } |
1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/device.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/stddef.h> struct mdiobus_devres { struct mii_bus *mii; }; static void devm_mdiobus_free(struct device *dev, void *this) { struct mdiobus_devres *dr = this; mdiobus_free(dr->mii); } /** * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size() * @dev: Device to allocate mii_bus for * @sizeof_priv: Space to allocate for private structure * * Managed mdiobus_alloc_size. mii_bus allocated with this function is * automatically freed on driver detach. * * RETURNS: * Pointer to allocated mii_bus on success, NULL on out-of-memory error. */ struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv) { struct mdiobus_devres *dr; dr = devres_alloc(devm_mdiobus_free, sizeof(*dr), GFP_KERNEL); if (!dr) return NULL; dr->mii = mdiobus_alloc_size(sizeof_priv); if (!dr->mii) { devres_free(dr); return NULL; } devres_add(dev, dr); return dr->mii; } EXPORT_SYMBOL(devm_mdiobus_alloc_size); static void devm_mdiobus_unregister(struct device *dev, void *this) { struct mdiobus_devres *dr = this; mdiobus_unregister(dr->mii); } static int mdiobus_devres_match(struct device *dev, void *this, void *match_data) { struct mdiobus_devres *res = this; struct mii_bus *mii = match_data; return mii == res->mii; } /** * __devm_mdiobus_register - Resource-managed variant of mdiobus_register() * @dev: Device to register mii_bus for * @bus: MII bus structure to register * @owner: Owning module * * Returns 0 on success, negative error number on failure. */ int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, struct module *owner) { struct mdiobus_devres *dr; int ret; if (WARN_ON(!devres_find(dev, devm_mdiobus_free, mdiobus_devres_match, bus))) return -EINVAL; dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; ret = __mdiobus_register(bus, owner); if (ret) { devres_free(dr); return ret; } dr->mii = bus; devres_add(dev, dr); return 0; } EXPORT_SYMBOL(__devm_mdiobus_register); #if IS_ENABLED(CONFIG_OF_MDIO) /** * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register() * @dev: Device to register mii_bus for * @mdio: MII bus structure to register * @np: Device node to parse * @owner: Owning module */ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, struct device_node *np, struct module *owner) { struct mdiobus_devres *dr; int ret; if (WARN_ON(!devres_find(dev, devm_mdiobus_free, mdiobus_devres_match, mdio))) return -EINVAL; dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; ret = __of_mdiobus_register(mdio, np, owner); if (ret) { devres_free(dr); return ret; } dr->mii = mdio; devres_add(dev, dr); return 0; } EXPORT_SYMBOL(__devm_of_mdiobus_register); #endif /* CONFIG_OF_MDIO */ MODULE_DESCRIPTION("Network MDIO bus devres helpers"); MODULE_LICENSE("GPL"); |
1 1 2 2 2 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Holtek On Line Grip based gamepads * * These include at least a Brazilian "Clone Joypad Super Power Fire" * which uses vendor ID 0x1241 and identifies as "HOLTEK On Line Grip". * * Copyright (c) 2011 Anssi Hannula <anssi.hannula@iki.fi> */ /* */ #include <linux/hid.h> #include <linux/input.h> #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" #ifdef CONFIG_HOLTEK_FF /* * These commands and parameters are currently known: * * byte 0: command id: * 01 set effect parameters * 02 play specified effect * 03 stop specified effect * 04 stop all effects * 06 stop all effects * (the difference between 04 and 06 isn't known; win driver * sends 06,04 on application init, and 06 otherwise) * * Commands 01 and 02 need to be sent as pairs, i.e. you need to send 01 * before each 02. * * The rest of the bytes are parameters. Command 01 takes all of them, and * commands 02,03 take only the effect id. * * byte 1: * bits 0-3: effect id: * 1: very strong rumble * 2: periodic rumble, short intervals * 3: very strong rumble * 4: periodic rumble, long intervals * 5: weak periodic rumble, long intervals * 6: weak periodic rumble, short intervals * 7: periodic rumble, short intervals * 8: strong periodic rumble, short intervals * 9: very strong rumble * a: causes an error * b: very strong periodic rumble, very short intervals * c-f: nothing * bit 6: right (weak) motor enabled * bit 7: left (strong) motor enabled * * bytes 2-3: time in milliseconds, big-endian * bytes 5-6: unknown (win driver seems to use at least 10e0 with effect 1 * and 0014 with effect 6) * byte 7: * bits 0-3: effect magnitude */ #define HOLTEKFF_MSG_LENGTH 7 static const u8 start_effect_1[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 stop_all4[] = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 stop_all6[] = { 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; struct holtekff_device { struct hid_field *field; }; static void holtekff_send(struct holtekff_device *holtekff, struct hid_device *hid, const u8 data[HOLTEKFF_MSG_LENGTH]) { int i; for (i = 0; i < HOLTEKFF_MSG_LENGTH; i++) { holtekff->field->value[i] = data[i]; } dbg_hid("sending %7ph\n", data); hid_hw_request(hid, holtekff->field->report, HID_REQ_SET_REPORT); } static int holtekff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct holtekff_device *holtekff = data; int left, right; /* effect type 1, length 65535 msec */ u8 buf[HOLTEKFF_MSG_LENGTH] = { 0x01, 0x01, 0xff, 0xff, 0x10, 0xe0, 0x00 }; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dbg_hid("called with 0x%04x 0x%04x\n", left, right); if (!left && !right) { holtekff_send(holtekff, hid, stop_all6); return 0; } if (left) buf[1] |= 0x80; if (right) buf[1] |= 0x40; /* The device takes a single magnitude, so we just sum them up. */ buf[6] = min(0xf, (left >> 12) + (right >> 12)); holtekff_send(holtekff, hid, buf); holtekff_send(holtekff, hid, start_effect_1); return 0; } static int holtekff_init(struct hid_device *hid) { struct holtekff_device *holtekff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev; int error; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; if (list_empty(report_list)) { hid_err(hid, "no output report found\n"); return -ENODEV; } report = list_entry(report_list->next, struct hid_report, list); if (report->maxfield < 1 || report->field[0]->report_count != 7) { hid_err(hid, "unexpected output report layout\n"); return -ENODEV; } holtekff = kzalloc(sizeof(*holtekff), GFP_KERNEL); if (!holtekff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); holtekff->field = report->field[0]; /* initialize the same way as win driver does */ holtekff_send(holtekff, hid, stop_all4); holtekff_send(holtekff, hid, stop_all6); error = input_ff_create_memless(dev, holtekff, holtekff_play); if (error) { kfree(holtekff); return error; } hid_info(hid, "Force feedback for Holtek On Line Grip based devices by Anssi Hannula <anssi.hannula@iki.fi>\n"); return 0; } #else static inline int holtekff_init(struct hid_device *hid) { return 0; } #endif static int holtek_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } holtekff_init(hdev); return 0; err: return ret; } static const struct hid_device_id holtek_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, { } }; MODULE_DEVICE_TABLE(hid, holtek_devices); static struct hid_driver holtek_driver = { .name = "holtek", .id_table = holtek_devices, .probe = holtek_probe, }; module_hid_driver(holtek_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>"); MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices"); |
4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 5 5 5 5 4 4 4 4 4 4 4 4 2 5 5 5 5 5 2 2 5 4 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 | // SPDX-License-Identifier: GPL-2.0 #define pr_fmt(fmt) "irq: " fmt #include <linux/acpi.h> #include <linux/debugfs.h> #include <linux/hardirq.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdesc.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/topology.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/fs.h> static LIST_HEAD(irq_domain_list); static DEFINE_MUTEX(irq_domain_mutex); static struct irq_domain *irq_default_domain; static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity); static void irq_domain_check_hierarchy(struct irq_domain *domain); static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq); struct irqchip_fwid { struct fwnode_handle fwnode; unsigned int type; char *name; phys_addr_t *pa; }; #ifdef CONFIG_GENERIC_IRQ_DEBUGFS static void debugfs_add_domain_dir(struct irq_domain *d); static void debugfs_remove_domain_dir(struct irq_domain *d); #else static inline void debugfs_add_domain_dir(struct irq_domain *d) { } static inline void debugfs_remove_domain_dir(struct irq_domain *d) { } #endif static const char *irqchip_fwnode_get_name(const struct fwnode_handle *fwnode) { struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode); return fwid->name; } const struct fwnode_operations irqchip_fwnode_ops = { .get_name = irqchip_fwnode_get_name, }; EXPORT_SYMBOL_GPL(irqchip_fwnode_ops); /** * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for * identifying an irq domain * @type: Type of irqchip_fwnode. See linux/irqdomain.h * @id: Optional user provided id if name != NULL * @name: Optional user provided domain name * @pa: Optional user-provided physical address * * Allocate a struct irqchip_fwid, and return a pointer to the embedded * fwnode_handle (or NULL on failure). * * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are * solely to transport name information to irqdomain creation code. The * node is not stored. For other types the pointer is kept in the irq * domain struct. */ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, const char *name, phys_addr_t *pa) { struct irqchip_fwid *fwid; char *n; fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); switch (type) { case IRQCHIP_FWNODE_NAMED: n = kasprintf(GFP_KERNEL, "%s", name); break; case IRQCHIP_FWNODE_NAMED_ID: n = kasprintf(GFP_KERNEL, "%s-%d", name, id); break; default: n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa); break; } if (!fwid || !n) { kfree(fwid); kfree(n); return NULL; } fwid->type = type; fwid->name = n; fwid->pa = pa; fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops); return &fwid->fwnode; } EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode); /** * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle * @fwnode: fwnode_handle to free * * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. */ void irq_domain_free_fwnode(struct fwnode_handle *fwnode) { struct irqchip_fwid *fwid; if (!fwnode || WARN_ON(!is_fwnode_irqchip(fwnode))) return; fwid = container_of(fwnode, struct irqchip_fwid, fwnode); kfree(fwid->name); kfree(fwid); } EXPORT_SYMBOL_GPL(irq_domain_free_fwnode); static int alloc_name(struct irq_domain *domain, char *base, enum irq_domain_bus_token bus_token) { if (bus_token == DOMAIN_BUS_ANY) domain->name = kasprintf(GFP_KERNEL, "%s", base); else domain->name = kasprintf(GFP_KERNEL, "%s-%d", base, bus_token); if (!domain->name) return -ENOMEM; domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; return 0; } static int alloc_fwnode_name(struct irq_domain *domain, const struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token, const char *suffix) { const char *sep = suffix ? "-" : ""; const char *suf = suffix ? : ""; char *name; if (bus_token == DOMAIN_BUS_ANY) name = kasprintf(GFP_KERNEL, "%pfw%s%s", fwnode, sep, suf); else name = kasprintf(GFP_KERNEL, "%pfw%s%s-%d", fwnode, sep, suf, bus_token); if (!name) return -ENOMEM; /* * fwnode paths contain '/', which debugfs is legitimately unhappy * about. Replace them with ':', which does the trick and is not as * offensive as '\'... */ domain->name = strreplace(name, '/', ':'); domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; return 0; } static int alloc_unknown_name(struct irq_domain *domain, enum irq_domain_bus_token bus_token) { static atomic_t unknown_domains; int id = atomic_inc_return(&unknown_domains); if (bus_token == DOMAIN_BUS_ANY) domain->name = kasprintf(GFP_KERNEL, "unknown-%d", id); else domain->name = kasprintf(GFP_KERNEL, "unknown-%d-%d", id, bus_token); if (!domain->name) return -ENOMEM; domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; return 0; } static int irq_domain_set_name(struct irq_domain *domain, const struct irq_domain_info *info) { enum irq_domain_bus_token bus_token = info->bus_token; const struct fwnode_handle *fwnode = info->fwnode; if (is_fwnode_irqchip(fwnode)) { struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode); /* * The name_suffix is only intended to be used to avoid a name * collision when multiple domains are created for a single * device and the name is picked using a real device node. * (Typical use-case is regmap-IRQ controllers for devices * providing more than one physical IRQ.) There should be no * need to use name_suffix with irqchip-fwnode. */ if (info->name_suffix) return -EINVAL; switch (fwid->type) { case IRQCHIP_FWNODE_NAMED: case IRQCHIP_FWNODE_NAMED_ID: return alloc_name(domain, fwid->name, bus_token); default: domain->name = fwid->name; if (bus_token != DOMAIN_BUS_ANY) return alloc_name(domain, fwid->name, bus_token); } } else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) || is_software_node(fwnode)) { return alloc_fwnode_name(domain, fwnode, bus_token, info->name_suffix); } if (domain->name) return 0; if (fwnode) pr_err("Invalid fwnode type for irqdomain\n"); return alloc_unknown_name(domain, bus_token); } static struct irq_domain *__irq_domain_create(const struct irq_domain_info *info) { struct irq_domain *domain; int err; if (WARN_ON((info->size && info->direct_max) || (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && info->direct_max) || (info->direct_max && info->direct_max != info->hwirq_max))) return ERR_PTR(-EINVAL); domain = kzalloc_node(struct_size(domain, revmap, info->size), GFP_KERNEL, of_node_to_nid(to_of_node(info->fwnode))); if (!domain) return ERR_PTR(-ENOMEM); err = irq_domain_set_name(domain, info); if (err) { kfree(domain); return ERR_PTR(err); } domain->fwnode = fwnode_handle_get(info->fwnode); fwnode_dev_initialized(domain->fwnode, true); /* Fill structure */ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); domain->ops = info->ops; domain->host_data = info->host_data; domain->bus_token = info->bus_token; domain->hwirq_max = info->hwirq_max; if (info->direct_max) domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP; domain->revmap_size = info->size; /* * Hierarchical domains use the domain lock of the root domain * (innermost domain). * * For non-hierarchical domains (as for root domains), the root * pointer is set to the domain itself so that &domain->root->mutex * always points to the right lock. */ mutex_init(&domain->mutex); domain->root = domain; irq_domain_check_hierarchy(domain); return domain; } static void __irq_domain_publish(struct irq_domain *domain) { mutex_lock(&irq_domain_mutex); debugfs_add_domain_dir(domain); list_add(&domain->link, &irq_domain_list); mutex_unlock(&irq_domain_mutex); pr_debug("Added domain %s\n", domain->name); } static void irq_domain_free(struct irq_domain *domain) { fwnode_dev_initialized(domain->fwnode, false); fwnode_handle_put(domain->fwnode); if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) kfree(domain->name); kfree(domain); } static void irq_domain_instantiate_descs(const struct irq_domain_info *info) { if (!IS_ENABLED(CONFIG_SPARSE_IRQ)) return; if (irq_alloc_descs(info->virq_base, info->virq_base, info->size, of_node_to_nid(to_of_node(info->fwnode))) < 0) { pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", info->virq_base); } } static struct irq_domain *__irq_domain_instantiate(const struct irq_domain_info *info, bool cond_alloc_descs, bool force_associate) { struct irq_domain *domain; int err; domain = __irq_domain_create(info); if (IS_ERR(domain)) return domain; domain->flags |= info->domain_flags; domain->exit = info->exit; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY if (info->parent) { domain->root = info->parent->root; domain->parent = info->parent; } #endif if (info->dgc_info) { err = irq_domain_alloc_generic_chips(domain, info->dgc_info); if (err) goto err_domain_free; } if (info->init) { err = info->init(domain); if (err) goto err_domain_gc_remove; } __irq_domain_publish(domain); if (cond_alloc_descs && info->virq_base > 0) irq_domain_instantiate_descs(info); /* * Legacy interrupt domains have a fixed Linux interrupt number * associated. Other interrupt domains can request association by * providing a Linux interrupt number > 0. */ if (force_associate || info->virq_base > 0) { irq_domain_associate_many(domain, info->virq_base, info->hwirq_base, info->size - info->hwirq_base); } return domain; err_domain_gc_remove: if (info->dgc_info) irq_domain_remove_generic_chips(domain); err_domain_free: irq_domain_free(domain); return ERR_PTR(err); } /** * irq_domain_instantiate() - Instantiate a new irq domain data structure * @info: Domain information pointer pointing to the information for this domain * * Return: A pointer to the instantiated irq domain or an ERR_PTR value. */ struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info) { return __irq_domain_instantiate(info, false, false); } EXPORT_SYMBOL_GPL(irq_domain_instantiate); /** * irq_domain_remove() - Remove an irq domain. * @domain: domain to remove * * This routine is used to remove an irq domain. The caller must ensure * that all mappings within the domain have been disposed of prior to * use, depending on the revmap type. */ void irq_domain_remove(struct irq_domain *domain) { if (domain->exit) domain->exit(domain); mutex_lock(&irq_domain_mutex); debugfs_remove_domain_dir(domain); WARN_ON(!radix_tree_empty(&domain->revmap_tree)); list_del(&domain->link); /* * If the going away domain is the default one, reset it. */ if (unlikely(irq_default_domain == domain)) irq_set_default_domain(NULL); mutex_unlock(&irq_domain_mutex); if (domain->flags & IRQ_DOMAIN_FLAG_DESTROY_GC) irq_domain_remove_generic_chips(domain); pr_debug("Removed domain %s\n", domain->name); irq_domain_free(domain); } EXPORT_SYMBOL_GPL(irq_domain_remove); void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token) { char *name; if (domain->bus_token == bus_token) return; mutex_lock(&irq_domain_mutex); domain->bus_token = bus_token; name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token); if (!name) { mutex_unlock(&irq_domain_mutex); return; } debugfs_remove_domain_dir(domain); if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) kfree(domain->name); else domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; domain->name = name; debugfs_add_domain_dir(domain); mutex_unlock(&irq_domain_mutex); } EXPORT_SYMBOL_GPL(irq_domain_update_bus_token); /** * irq_domain_create_simple() - Register an irq_domain and optionally map a range of irqs * @fwnode: firmware node for the interrupt controller * @size: total number of irqs in mapping * @first_irq: first number of irq block assigned to the domain, * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then * pre-map all of the irqs in the domain to virqs starting at first_irq. * @ops: domain callbacks * @host_data: Controller private data pointer * * Allocates an irq_domain, and optionally if first_irq is positive then also * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. * * This is intended to implement the expected behaviour for most * interrupt controllers. If device tree is used, then first_irq will be 0 and * irqs get mapped dynamically on the fly. However, if the controller requires * static virq assignments (non-DT boot) then it will set that up correctly. */ struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = fwnode, .size = size, .hwirq_max = size, .virq_base = first_irq, .ops = ops, .host_data = host_data, }; struct irq_domain *domain = __irq_domain_instantiate(&info, true, false); return IS_ERR(domain) ? NULL : domain; } EXPORT_SYMBOL_GPL(irq_domain_create_simple); struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = fwnode, .size = first_hwirq + size, .hwirq_max = first_hwirq + size, .hwirq_base = first_hwirq, .virq_base = first_irq, .ops = ops, .host_data = host_data, }; struct irq_domain *domain = __irq_domain_instantiate(&info, false, true); return IS_ERR(domain) ? NULL : domain; } EXPORT_SYMBOL_GPL(irq_domain_create_legacy); /** * irq_find_matching_fwspec() - Locates a domain for a given fwspec * @fwspec: FW specifier for an interrupt * @bus_token: domain-specific data */ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token) { struct irq_domain *h, *found = NULL; struct fwnode_handle *fwnode = fwspec->fwnode; int rc; /* * We might want to match the legacy controller last since * it might potentially be set to match all interrupts in * the absence of a device node. This isn't a problem so far * yet though... * * bus_token == DOMAIN_BUS_ANY matches any domain, any other * values must generate an exact match for the domain to be * selected. */ mutex_lock(&irq_domain_mutex); list_for_each_entry(h, &irq_domain_list, link) { if (h->ops->select && bus_token != DOMAIN_BUS_ANY) rc = h->ops->select(h, fwspec, bus_token); else if (h->ops->match) rc = h->ops->match(h, to_of_node(fwnode), bus_token); else rc = ((fwnode != NULL) && (h->fwnode == fwnode) && ((bus_token == DOMAIN_BUS_ANY) || (h->bus_token == bus_token))); if (rc) { found = h; break; } } mutex_unlock(&irq_domain_mutex); return found; } EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); /** * irq_set_default_domain() - Set a "default" irq domain * @domain: default domain pointer * * For convenience, it's possible to set a "default" domain that will be used * whenever NULL is passed to irq_create_mapping(). It makes life easier for * platforms that want to manipulate a few hard coded interrupt numbers that * aren't properly represented in the device-tree. */ void irq_set_default_domain(struct irq_domain *domain) { pr_debug("Default domain set to @0x%p\n", domain); irq_default_domain = domain; } EXPORT_SYMBOL_GPL(irq_set_default_domain); /** * irq_get_default_domain() - Retrieve the "default" irq domain * * Returns: the default domain, if any. * * Modern code should never use this. This should only be used on * systems that cannot implement a firmware->fwnode mapping (which * both DT and ACPI provide). */ struct irq_domain *irq_get_default_domain(void) { return irq_default_domain; } EXPORT_SYMBOL_GPL(irq_get_default_domain); static bool irq_domain_is_nomap(struct irq_domain *domain) { return IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && (domain->flags & IRQ_DOMAIN_FLAG_NO_MAP); } static void irq_domain_clear_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { lockdep_assert_held(&domain->root->mutex); if (irq_domain_is_nomap(domain)) return; if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], NULL); else radix_tree_delete(&domain->revmap_tree, hwirq); } static void irq_domain_set_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_data *irq_data) { /* * This also makes sure that all domains point to the same root when * called from irq_domain_insert_irq() for each domain in a hierarchy. */ lockdep_assert_held(&domain->root->mutex); if (irq_domain_is_nomap(domain)) return; if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], irq_data); else radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); } static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) { struct irq_data *irq_data = irq_get_irq_data(irq); irq_hw_number_t hwirq; if (WARN(!irq_data || irq_data->domain != domain, "virq%i doesn't exist; cannot disassociate\n", irq)) return; hwirq = irq_data->hwirq; mutex_lock(&domain->root->mutex); irq_set_status_flags(irq, IRQ_NOREQUEST); /* remove chip and handler */ irq_set_chip_and_handler(irq, NULL, NULL); /* Make sure it's completed */ synchronize_irq(irq); /* Tell the PIC about it */ if (domain->ops->unmap) domain->ops->unmap(domain, irq); smp_mb(); irq_data->domain = NULL; irq_data->hwirq = 0; domain->mapcount--; /* Clear reverse map for this hwirq */ irq_domain_clear_mapping(domain, hwirq); mutex_unlock(&domain->root->mutex); } static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { struct irq_data *irq_data = irq_get_irq_data(virq); int ret; if (WARN(hwirq >= domain->hwirq_max, "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) return -EINVAL; if (WARN(!irq_data, "error: virq%i is not allocated", virq)) return -EINVAL; if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) return -EINVAL; irq_data->hwirq = hwirq; irq_data->domain = domain; if (domain->ops->map) { ret = domain->ops->map(domain, virq, hwirq); if (ret != 0) { /* * If map() returns -EPERM, this interrupt is protected * by the firmware or some other service and shall not * be mapped. Don't bother telling the user about it. */ if (ret != -EPERM) { pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", domain->name, hwirq, virq, ret); } irq_data->domain = NULL; irq_data->hwirq = 0; return ret; } } domain->mapcount++; irq_domain_set_mapping(domain, hwirq, irq_data); irq_clear_status_flags(virq, IRQ_NOREQUEST); return 0; } int irq_domain_associate(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { int ret; mutex_lock(&domain->root->mutex); ret = irq_domain_associate_locked(domain, virq, hwirq); mutex_unlock(&domain->root->mutex); return ret; } EXPORT_SYMBOL_GPL(irq_domain_associate); void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count) { struct device_node *of_node; int i; of_node = irq_domain_get_of_node(domain); pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, of_node_full_name(of_node), irq_base, (int)hwirq_base, count); for (i = 0; i < count; i++) irq_domain_associate(domain, irq_base + i, hwirq_base + i); } EXPORT_SYMBOL_GPL(irq_domain_associate_many); #ifdef CONFIG_IRQ_DOMAIN_NOMAP /** * irq_create_direct_mapping() - Allocate an irq for direct mapping * @domain: domain to allocate the irq for or NULL for default domain * * This routine is used for irq controllers which can choose the hardware * interrupt numbers they generate. In such a case it's simplest to use * the linux irq as the hardware interrupt number. It still uses the linear * or radix tree to store the mapping, but the irq controller can optimize * the revmap path by using the hwirq directly. */ unsigned int irq_create_direct_mapping(struct irq_domain *domain) { struct device_node *of_node; unsigned int virq; if (domain == NULL) domain = irq_default_domain; of_node = irq_domain_get_of_node(domain); virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); if (!virq) { pr_debug("create_direct virq allocation failed\n"); return 0; } if (virq >= domain->hwirq_max) { pr_err("ERROR: no free irqs available below %lu maximum\n", domain->hwirq_max); irq_free_desc(virq); return 0; } pr_debug("create_direct obtained virq %d\n", virq); if (irq_domain_associate(domain, virq, virq)) { irq_free_desc(virq); return 0; } return virq; } EXPORT_SYMBOL_GPL(irq_create_direct_mapping); #endif static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain, irq_hw_number_t hwirq, const struct irq_affinity_desc *affinity) { struct device_node *of_node = irq_domain_get_of_node(domain); int virq; pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); /* Allocate a virtual interrupt number */ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), affinity); if (virq <= 0) { pr_debug("-> virq allocation failed\n"); return 0; } if (irq_domain_associate_locked(domain, virq, hwirq)) { irq_free_desc(virq); return 0; } pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", hwirq, of_node_full_name(of_node), virq); return virq; } /** * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space * @domain: domain owning this hardware interrupt or NULL for default domain * @hwirq: hardware irq number in that domain space * @affinity: irq affinity * * Only one mapping per hardware interrupt is permitted. Returns a linux * irq number. * If the sense/trigger is to be specified, set_irq_type() should be called * on the number returned from that call. */ unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq, const struct irq_affinity_desc *affinity) { int virq; /* Look for default domain if necessary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) { WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); return 0; } mutex_lock(&domain->root->mutex); /* Check if mapping already exists */ virq = irq_find_mapping(domain, hwirq); if (virq) { pr_debug("existing mapping on virq %d\n", virq); goto out; } virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity); out: mutex_unlock(&domain->root->mutex); return virq; } EXPORT_SYMBOL_GPL(irq_create_mapping_affinity); static int irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, irq_hw_number_t *hwirq, unsigned int *type) { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY if (d->ops->translate) return d->ops->translate(d, fwspec, hwirq, type); #endif if (d->ops->xlate) return d->ops->xlate(d, to_of_node(fwspec->fwnode), fwspec->param, fwspec->param_count, hwirq, type); /* If domain has no translation, then we assume interrupt line */ *hwirq = fwspec->param[0]; return 0; } void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, unsigned int count, struct irq_fwspec *fwspec) { int i; fwspec->fwnode = of_fwnode_handle(np); fwspec->param_count = count; for (i = 0; i < count; i++) fwspec->param[i] = args[i]; } EXPORT_SYMBOL_GPL(of_phandle_args_to_fwspec); unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) { struct irq_domain *domain; struct irq_data *irq_data; irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; int virq; if (fwspec->fwnode) { domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED); if (!domain) domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY); } else { domain = irq_default_domain; } if (!domain) { pr_warn("no irq domain found for %s !\n", of_node_full_name(to_of_node(fwspec->fwnode))); return 0; } if (irq_domain_translate(domain, fwspec, &hwirq, &type)) return 0; /* * WARN if the irqchip returns a type with bits * outside the sense mask set and clear these bits. */ if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) type &= IRQ_TYPE_SENSE_MASK; mutex_lock(&domain->root->mutex); /* * If we've already configured this interrupt, * don't do it again, or hell will break loose. */ virq = irq_find_mapping(domain, hwirq); if (virq) { /* * If the trigger type is not specified or matches the * current trigger type then we are done so return the * interrupt number. */ if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) goto out; /* * If the trigger type has not been set yet, then set * it now and return the interrupt number. */ if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { irq_data = irq_get_irq_data(virq); if (!irq_data) { virq = 0; goto out; } irqd_set_trigger_type(irq_data, type); goto out; } pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); virq = 0; goto out; } if (irq_domain_is_hierarchy(domain)) { if (irq_domain_is_msi_device(domain)) { mutex_unlock(&domain->root->mutex); virq = msi_device_domain_alloc_wired(domain, hwirq, type); mutex_lock(&domain->root->mutex); } else virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, fwspec, false, NULL); if (virq <= 0) { virq = 0; goto out; } } else { /* Create mapping */ virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL); if (!virq) goto out; } irq_data = irq_get_irq_data(virq); if (WARN_ON(!irq_data)) { virq = 0; goto out; } /* Store trigger type */ irqd_set_trigger_type(irq_data, type); out: mutex_unlock(&domain->root->mutex); return virq; } EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) { struct irq_fwspec fwspec; of_phandle_args_to_fwspec(irq_data->np, irq_data->args, irq_data->args_count, &fwspec); return irq_create_fwspec_mapping(&fwspec); } EXPORT_SYMBOL_GPL(irq_create_of_mapping); /** * irq_dispose_mapping() - Unmap an interrupt * @virq: linux irq number of the interrupt to unmap */ void irq_dispose_mapping(unsigned int virq) { struct irq_data *irq_data; struct irq_domain *domain; irq_data = virq ? irq_get_irq_data(virq) : NULL; if (!irq_data) return; domain = irq_data->domain; if (WARN_ON(domain == NULL)) return; if (irq_domain_is_hierarchy(domain)) { irq_domain_free_one_irq(domain, virq); } else { irq_domain_disassociate(domain, virq); irq_free_desc(virq); } } EXPORT_SYMBOL_GPL(irq_dispose_mapping); /** * __irq_resolve_mapping() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space * @irq: optional pointer to return the Linux irq if required * * Returns the interrupt descriptor. */ struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, unsigned int *irq) { struct irq_desc *desc = NULL; struct irq_data *data; /* Look for default domain if necessary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) return desc; if (irq_domain_is_nomap(domain)) { if (hwirq < domain->hwirq_max) { data = irq_domain_get_irq_data(domain, hwirq); if (data && data->hwirq == hwirq) desc = irq_data_to_desc(data); if (irq && desc) *irq = hwirq; } return desc; } rcu_read_lock(); /* Check if the hwirq is in the linear revmap. */ if (hwirq < domain->revmap_size) data = rcu_dereference(domain->revmap[hwirq]); else data = radix_tree_lookup(&domain->revmap_tree, hwirq); if (likely(data)) { desc = irq_data_to_desc(data); if (irq) *irq = data->irq; } rcu_read_unlock(); return desc; } EXPORT_SYMBOL_GPL(__irq_resolve_mapping); /** * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated * @intspec: The interrupt specifier data from the device tree * @intsize: The number of entries in @intspec * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with one cell * bindings where the cell value maps directly to the hwirq number. */ int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); /** * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated * @intspec: The interrupt specifier data from the device tree * @intsize: The number of entries in @intspec * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with two cell * bindings where the cell values map directly to the hwirq number * and linux irq flags. */ int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct irq_fwspec fwspec; of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec); return irq_domain_translate_twocell(d, &fwspec, out_hwirq, out_type); } EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); /** * irq_domain_xlate_twothreecell() - Generic xlate for direct two or three cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated * @intspec: The interrupt specifier data from the device tree * @intsize: The number of entries in @intspec * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree interrupt specifier translation function for two or three * cell bindings, where the cell values map directly to the hardware * interrupt number and the type specifier. */ int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct irq_fwspec fwspec; of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec); return irq_domain_translate_twothreecell(d, &fwspec, out_hwirq, out_type); } EXPORT_SYMBOL_GPL(irq_domain_xlate_twothreecell); /** * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated * @intspec: The interrupt specifier data from the device tree * @intsize: The number of entries in @intspec * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with either one * or two cell bindings where the cell values map directly to the hwirq number * and linux irq flags. * * Note: don't use this function unless your interrupt controller explicitly * supports both one and two cell bindings. For the majority of controllers * the _onecell() or _twocell() variants above should be used. */ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; if (intsize > 1) *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; else *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); const struct irq_domain_ops irq_domain_simple_ops = { .xlate = irq_domain_xlate_onetwocell, }; EXPORT_SYMBOL_GPL(irq_domain_simple_ops); /** * irq_domain_translate_onecell() - Generic translate for direct one cell * bindings * @d: Interrupt domain involved in the translation * @fwspec: The firmware interrupt specifier to translate * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type */ int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(fwspec->param_count < 1)) return -EINVAL; *out_hwirq = fwspec->param[0]; *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_translate_onecell); /** * irq_domain_translate_twocell() - Generic translate for direct two cell * bindings * @d: Interrupt domain involved in the translation * @fwspec: The firmware interrupt specifier to translate * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with two cell * bindings where the cell values map directly to the hwirq number * and linux irq flags. */ int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(fwspec->param_count < 2)) return -EINVAL; *out_hwirq = fwspec->param[0]; *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } EXPORT_SYMBOL_GPL(irq_domain_translate_twocell); /** * irq_domain_translate_twothreecell() - Generic translate for direct two or three cell * bindings * @d: Interrupt domain involved in the translation * @fwspec: The firmware interrupt specifier to translate * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Firmware interrupt specifier translation function for two or three cell * specifications, where the parameter values map directly to the hardware * interrupt number and the type specifier. */ int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type) { if (fwspec->param_count == 2) { *out_hwirq = fwspec->param[0]; *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } if (fwspec->param_count == 3) { *out_hwirq = fwspec->param[1]; *out_type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; return 0; } return -EINVAL; } EXPORT_SYMBOL_GPL(irq_domain_translate_twothreecell); int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity) { unsigned int hint; if (virq >= 0) { virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE, affinity); } else { hint = hwirq % irq_get_nr_irqs(); if (hint == 0) hint++; virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE, affinity); if (virq <= 0 && hint > 1) { virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE, affinity); } } return virq; } /** * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data * @irq_data: The pointer to irq_data */ void irq_domain_reset_irq_data(struct irq_data *irq_data) { irq_data->hwirq = 0; irq_data->chip = &no_irq_chip; irq_data->chip_data = NULL; } EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY static void irq_domain_insert_irq(int virq) { struct irq_data *data; for (data = irq_get_irq_data(virq); data; data = data->parent_data) { struct irq_domain *domain = data->domain; domain->mapcount++; irq_domain_set_mapping(domain, data->hwirq, data); } irq_clear_status_flags(virq, IRQ_NOREQUEST); } static void irq_domain_remove_irq(int virq) { struct irq_data *data; irq_set_status_flags(virq, IRQ_NOREQUEST); irq_set_chip_and_handler(virq, NULL, NULL); synchronize_irq(virq); smp_mb(); for (data = irq_get_irq_data(virq); data; data = data->parent_data) { struct irq_domain *domain = data->domain; irq_hw_number_t hwirq = data->hwirq; domain->mapcount--; irq_domain_clear_mapping(domain, hwirq); } } static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, struct irq_data *child) { struct irq_data *irq_data; irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, irq_data_get_node(child)); if (irq_data) { child->parent_data = irq_data; irq_data->irq = child->irq; irq_data->common = child->common; irq_data->domain = domain; } return irq_data; } static void __irq_domain_free_hierarchy(struct irq_data *irq_data) { struct irq_data *tmp; while (irq_data) { tmp = irq_data; irq_data = irq_data->parent_data; kfree(tmp); } } static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data, *tmp; int i; for (i = 0; i < nr_irqs; i++) { irq_data = irq_get_irq_data(virq + i); tmp = irq_data->parent_data; irq_data->parent_data = NULL; irq_data->domain = NULL; __irq_domain_free_hierarchy(tmp); } } /** * irq_domain_disconnect_hierarchy - Mark the first unused level of a hierarchy * @domain: IRQ domain from which the hierarchy is to be disconnected * @virq: IRQ number where the hierarchy is to be trimmed * * Marks the @virq level belonging to @domain as disconnected. * Returns -EINVAL if @virq doesn't have a valid irq_data pointing * to @domain. * * Its only use is to be able to trim levels of hierarchy that do not * have any real meaning for this interrupt, and that the driver marks * as such from its .alloc() callback. */ int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq) { struct irq_data *irqd; irqd = irq_domain_get_irq_data(domain, virq); if (!irqd) return -EINVAL; irqd->chip = ERR_PTR(-ENOTCONN); return 0; } EXPORT_SYMBOL_GPL(irq_domain_disconnect_hierarchy); static int irq_domain_trim_hierarchy(unsigned int virq) { struct irq_data *tail, *irqd, *irq_data; irq_data = irq_get_irq_data(virq); tail = NULL; /* The first entry must have a valid irqchip */ if (IS_ERR_OR_NULL(irq_data->chip)) return -EINVAL; /* * Validate that the irq_data chain is sane in the presence of * a hierarchy trimming marker. */ for (irqd = irq_data->parent_data; irqd; irq_data = irqd, irqd = irqd->parent_data) { /* Can't have a valid irqchip after a trim marker */ if (irqd->chip && tail) return -EINVAL; /* Can't have an empty irqchip before a trim marker */ if (!irqd->chip && !tail) return -EINVAL; if (IS_ERR(irqd->chip)) { /* Only -ENOTCONN is a valid trim marker */ if (PTR_ERR(irqd->chip) != -ENOTCONN) return -EINVAL; tail = irq_data; } } /* No trim marker, nothing to do */ if (!tail) return 0; pr_info("IRQ%d: trimming hierarchy from %s\n", virq, tail->parent_data->domain->name); /* Sever the inner part of the hierarchy... */ irqd = tail; tail = tail->parent_data; irqd->parent_data = NULL; __irq_domain_free_hierarchy(tail); return 0; } static int irq_domain_alloc_irq_data(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data; struct irq_domain *parent; int i; /* The outermost irq_data is embedded in struct irq_desc */ for (i = 0; i < nr_irqs; i++) { irq_data = irq_get_irq_data(virq + i); irq_data->domain = domain; for (parent = domain->parent; parent; parent = parent->parent) { irq_data = irq_domain_insert_irq_data(parent, irq_data); if (!irq_data) { irq_domain_free_irq_data(virq, i + 1); return -ENOMEM; } } } return 0; } /** * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain * @domain: domain to match * @virq: IRQ number to get irq_data */ struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq) { struct irq_data *irq_data; for (irq_data = irq_get_irq_data(virq); irq_data; irq_data = irq_data->parent_data) if (irq_data->domain == domain) return irq_data; return NULL; } EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); /** * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain * @domain: Interrupt domain to match * @virq: IRQ number * @hwirq: The hwirq number * @chip: The associated interrupt chip * @chip_data: The associated chip data */ int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data) { struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); if (!irq_data) return -ENOENT; irq_data->hwirq = hwirq; irq_data->chip = (struct irq_chip *)(chip ? chip : &no_irq_chip); irq_data->chip_data = chip_data; return 0; } EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip); /** * irq_domain_set_info - Set the complete data for a @virq in @domain * @domain: Interrupt domain to match * @virq: IRQ number * @hwirq: The hardware interrupt number * @chip: The associated interrupt chip * @chip_data: The associated interrupt chip data * @handler: The interrupt flow handler * @handler_data: The interrupt flow handler data * @handler_name: The interrupt handler name */ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name) { irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); __irq_set_handler(virq, handler, 0, handler_name); irq_set_handler_data(virq, handler_data); } EXPORT_SYMBOL(irq_domain_set_info); /** * irq_domain_free_irqs_common - Clear irq_data and free the parent * @domain: Interrupt domain to match * @virq: IRQ number to start with * @nr_irqs: The number of irqs to free */ void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data; int i; for (i = 0; i < nr_irqs; i++) { irq_data = irq_domain_get_irq_data(domain, virq + i); if (irq_data) irq_domain_reset_irq_data(irq_data); } irq_domain_free_irqs_parent(domain, virq, nr_irqs); } EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common); /** * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent * @domain: Interrupt domain to match * @virq: IRQ number to start with * @nr_irqs: The number of irqs to free */ void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { int i; for (i = 0; i < nr_irqs; i++) { irq_set_handler_data(virq + i, NULL); irq_set_handler(virq + i, NULL); } irq_domain_free_irqs_common(domain, virq, nr_irqs); } static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs) { unsigned int i; if (!domain->ops->free) return; for (i = 0; i < nr_irqs; i++) { if (irq_domain_get_irq_data(domain, irq_base + i)) domain->ops->free(domain, irq_base + i, 1); } } static int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { if (!domain->ops->alloc) { pr_debug("domain->ops->alloc() is NULL\n"); return -ENOSYS; } return domain->ops->alloc(domain, irq_base, nr_irqs, arg); } static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity) { int i, ret, virq; if (realloc && irq_base >= 0) { virq = irq_base; } else { virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, affinity); if (virq < 0) { pr_debug("cannot allocate IRQ(base %d, count %d)\n", irq_base, nr_irqs); return virq; } } if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { pr_debug("cannot allocate memory for IRQ%d\n", virq); ret = -ENOMEM; goto out_free_desc; } ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg); if (ret < 0) goto out_free_irq_data; for (i = 0; i < nr_irqs; i++) { ret = irq_domain_trim_hierarchy(virq + i); if (ret) goto out_free_irq_data; } for (i = 0; i < nr_irqs; i++) irq_domain_insert_irq(virq + i); return virq; out_free_irq_data: irq_domain_free_irq_data(virq, nr_irqs); out_free_desc: irq_free_descs(virq, nr_irqs); return ret; } /** * __irq_domain_alloc_irqs - Allocate IRQs from domain * @domain: domain to allocate from * @irq_base: allocate specified IRQ number if irq_base >= 0 * @nr_irqs: number of IRQs to allocate * @node: NUMA node id for memory allocation * @arg: domain specific argument * @realloc: IRQ descriptors have already been allocated if true * @affinity: Optional irq affinity mask for multiqueue devices * * Allocate IRQ numbers and initialized all data structures to support * hierarchy IRQ domains. * Parameter @realloc is mainly to support legacy IRQs. * Returns error code or allocated IRQ number * * The whole process to setup an IRQ has been split into two steps. * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ * descriptor and required hardware resources. The second step, * irq_domain_activate_irq(), is to program the hardware with preallocated * resources. In this way, it's easier to rollback when failing to * allocate resources. */ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity) { int ret; if (domain == NULL) { domain = irq_default_domain; if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) return -EINVAL; } mutex_lock(&domain->root->mutex); ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg, realloc, affinity); mutex_unlock(&domain->root->mutex); return ret; } EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs); /* The irq_data was moved, fix the revmap to refer to the new location */ static void irq_domain_fix_revmap(struct irq_data *d) { void __rcu **slot; lockdep_assert_held(&d->domain->root->mutex); if (irq_domain_is_nomap(d->domain)) return; /* Fix up the revmap. */ if (d->hwirq < d->domain->revmap_size) { /* Not using radix tree */ rcu_assign_pointer(d->domain->revmap[d->hwirq], d); } else { slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); if (slot) radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); } } /** * irq_domain_push_irq() - Push a domain in to the top of a hierarchy. * @domain: Domain to push. * @virq: Irq to push the domain in to. * @arg: Passed to the irq_domain_ops alloc() function. * * For an already existing irqdomain hierarchy, as might be obtained * via a call to pci_enable_msix(), add an additional domain to the * head of the processing chain. Must be called before request_irq() * has been called. */ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg) { struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *parent_irq_data; struct irq_desc *desc; int rv = 0; /* * Check that no action has been set, which indicates the virq * is in a state where this function doesn't have to deal with * races between interrupt handling and maintaining the * hierarchy. This will catch gross misuse. Attempting to * make the check race free would require holding locks across * calls to struct irq_domain_ops->alloc(), which could lead * to deadlock, so we just do a simple check before starting. */ desc = irq_to_desc(virq); if (!desc) return -EINVAL; if (WARN_ON(desc->action)) return -EBUSY; if (domain == NULL) return -EINVAL; if (WARN_ON(!irq_domain_is_hierarchy(domain))) return -EINVAL; if (!irq_data) return -EINVAL; if (domain->parent != irq_data->domain) return -EINVAL; parent_irq_data = kzalloc_node(sizeof(*parent_irq_data), GFP_KERNEL, irq_data_get_node(irq_data)); if (!parent_irq_data) return -ENOMEM; mutex_lock(&domain->root->mutex); /* Copy the original irq_data. */ *parent_irq_data = *irq_data; /* * Overwrite the irq_data, which is embedded in struct irq_desc, with * values for this domain. */ irq_data->parent_data = parent_irq_data; irq_data->domain = domain; irq_data->mask = 0; irq_data->hwirq = 0; irq_data->chip = NULL; irq_data->chip_data = NULL; /* May (probably does) set hwirq, chip, etc. */ rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); if (rv) { /* Restore the original irq_data. */ *irq_data = *parent_irq_data; kfree(parent_irq_data); goto error; } irq_domain_fix_revmap(parent_irq_data); irq_domain_set_mapping(domain, irq_data->hwirq, irq_data); error: mutex_unlock(&domain->root->mutex); return rv; } EXPORT_SYMBOL_GPL(irq_domain_push_irq); /** * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy. * @domain: Domain to remove. * @virq: Irq to remove the domain from. * * Undo the effects of a call to irq_domain_push_irq(). Must be * called either before request_irq() or after free_irq(). */ int irq_domain_pop_irq(struct irq_domain *domain, int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *parent_irq_data; struct irq_data *tmp_irq_data; struct irq_desc *desc; /* * Check that no action is set, which indicates the virq is in * a state where this function doesn't have to deal with races * between interrupt handling and maintaining the hierarchy. * This will catch gross misuse. Attempting to make the check * race free would require holding locks across calls to * struct irq_domain_ops->free(), which could lead to * deadlock, so we just do a simple check before starting. */ desc = irq_to_desc(virq); if (!desc) return -EINVAL; if (WARN_ON(desc->action)) return -EBUSY; if (domain == NULL) return -EINVAL; if (!irq_data) return -EINVAL; tmp_irq_data = irq_domain_get_irq_data(domain, virq); /* We can only "pop" if this domain is at the top of the list */ if (WARN_ON(irq_data != tmp_irq_data)) return -EINVAL; if (WARN_ON(irq_data->domain != domain)) return -EINVAL; parent_irq_data = irq_data->parent_data; if (WARN_ON(!parent_irq_data)) return -EINVAL; mutex_lock(&domain->root->mutex); irq_data->parent_data = NULL; irq_domain_clear_mapping(domain, irq_data->hwirq); irq_domain_free_irqs_hierarchy(domain, virq, 1); /* Restore the original irq_data. */ *irq_data = *parent_irq_data; irq_domain_fix_revmap(irq_data); mutex_unlock(&domain->root->mutex); kfree(parent_irq_data); return 0; } EXPORT_SYMBOL_GPL(irq_domain_pop_irq); /** * irq_domain_free_irqs - Free IRQ number and associated data structures * @virq: base IRQ number * @nr_irqs: number of IRQs to free */ void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { struct irq_data *data = irq_get_irq_data(virq); struct irq_domain *domain; int i; if (WARN(!data || !data->domain || !data->domain->ops->free, "NULL pointer, cannot free irq\n")) return; domain = data->domain; mutex_lock(&domain->root->mutex); for (i = 0; i < nr_irqs; i++) irq_domain_remove_irq(virq + i); irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs); mutex_unlock(&domain->root->mutex); irq_domain_free_irq_data(virq, nr_irqs); irq_free_descs(virq, nr_irqs); } static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) { if (irq_domain_is_msi_device(domain)) msi_device_domain_free_wired(domain, virq); else irq_domain_free_irqs(virq, 1); } /** * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain * @domain: Domain below which interrupts must be allocated * @irq_base: Base IRQ number * @nr_irqs: Number of IRQs to allocate * @arg: Allocation data (arch/domain specific) */ int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { if (!domain->parent) return -ENOSYS; return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base, nr_irqs, arg); } EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent); /** * irq_domain_free_irqs_parent - Free interrupts from parent domain * @domain: Domain below which interrupts must be freed * @irq_base: Base IRQ number * @nr_irqs: Number of IRQs to free */ void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs) { if (!domain->parent) return; irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs); } EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); static void __irq_domain_deactivate_irq(struct irq_data *irq_data) { if (irq_data && irq_data->domain) { struct irq_domain *domain = irq_data->domain; if (domain->ops->deactivate) domain->ops->deactivate(domain, irq_data); if (irq_data->parent_data) __irq_domain_deactivate_irq(irq_data->parent_data); } } static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve) { int ret = 0; if (irqd && irqd->domain) { struct irq_domain *domain = irqd->domain; if (irqd->parent_data) ret = __irq_domain_activate_irq(irqd->parent_data, reserve); if (!ret && domain->ops->activate) { ret = domain->ops->activate(domain, irqd, reserve); /* Rollback in case of error */ if (ret && irqd->parent_data) __irq_domain_deactivate_irq(irqd->parent_data); } } return ret; } /** * irq_domain_activate_irq - Call domain_ops->activate recursively to activate * interrupt * @irq_data: Outermost irq_data associated with interrupt * @reserve: If set only reserve an interrupt vector instead of assigning one * * This is the second step to call domain_ops->activate to program interrupt * controllers, so the interrupt could actually get delivered. */ int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve) { int ret = 0; if (!irqd_is_activated(irq_data)) ret = __irq_domain_activate_irq(irq_data, reserve); if (!ret) irqd_set_activated(irq_data); return ret; } /** * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to * deactivate interrupt * @irq_data: outermost irq_data associated with interrupt * * It calls domain_ops->deactivate to program interrupt controllers to disable * interrupt delivery. */ void irq_domain_deactivate_irq(struct irq_data *irq_data) { if (irqd_is_activated(irq_data)) { __irq_domain_deactivate_irq(irq_data); irqd_clr_activated(irq_data); } } static void irq_domain_check_hierarchy(struct irq_domain *domain) { /* Hierarchy irq_domains must implement callback alloc() */ if (domain->ops->alloc) domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; } #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ /* * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain * @domain: domain to match * @virq: IRQ number to get irq_data */ struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); return (irq_data && irq_data->domain == domain) ? irq_data : NULL; } EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); /* * irq_domain_set_info - Set the complete data for a @virq in @domain * @domain: Interrupt domain to match * @virq: IRQ number * @hwirq: The hardware interrupt number * @chip: The associated interrupt chip * @chip_data: The associated interrupt chip data * @handler: The interrupt flow handler * @handler_data: The interrupt flow handler data * @handler_name: The interrupt handler name */ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name) { irq_set_chip_and_handler_name(virq, chip, handler, handler_name); irq_set_chip_data(virq, chip_data); irq_set_handler_data(virq, handler_data); } static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity) { return -EINVAL; } static void irq_domain_check_hierarchy(struct irq_domain *domain) { } static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) { } #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS #include "internals.h" static struct dentry *domain_dir; static const struct irq_bit_descr irqdomain_flags[] = { BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_HIERARCHY), BIT_MASK_DESCR(IRQ_DOMAIN_NAME_ALLOCATED), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_IPI_PER_CPU), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_IPI_SINGLE), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_ISOLATED_MSI), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_NO_MAP), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI_PARENT), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI_DEVICE), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_NONCORE), }; static void irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind) { seq_printf(m, "%*sname: %s\n", ind, "", d->name); seq_printf(m, "%*ssize: %u\n", ind + 1, "", d->revmap_size); seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount); seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags); irq_debug_show_bits(m, ind, d->flags, irqdomain_flags, ARRAY_SIZE(irqdomain_flags)); if (d->ops && d->ops->debug_show) d->ops->debug_show(m, d, NULL, ind + 1); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY if (!d->parent) return; seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name); irq_domain_debug_show_one(m, d->parent, ind + 4); #endif } static int irq_domain_debug_show(struct seq_file *m, void *p) { struct irq_domain *d = m->private; /* Default domain? Might be NULL */ if (!d) { if (!irq_default_domain) return 0; d = irq_default_domain; } irq_domain_debug_show_one(m, d, 0); return 0; } DEFINE_SHOW_ATTRIBUTE(irq_domain_debug); static void debugfs_add_domain_dir(struct irq_domain *d) { if (!d->name || !domain_dir) return; debugfs_create_file(d->name, 0444, domain_dir, d, &irq_domain_debug_fops); } static void debugfs_remove_domain_dir(struct irq_domain *d) { debugfs_lookup_and_remove(d->name, domain_dir); } void __init irq_domain_debugfs_init(struct dentry *root) { struct irq_domain *d; domain_dir = debugfs_create_dir("domains", root); debugfs_create_file("default", 0444, domain_dir, NULL, &irq_domain_debug_fops); mutex_lock(&irq_domain_mutex); list_for_each_entry(d, &irq_domain_list, link) debugfs_add_domain_dir(d); mutex_unlock(&irq_domain_mutex); } #endif |
2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 | // SPDX-License-Identifier: GPL-2.0-or-later /* * TerraTec Cinergy T2/qanu USB2 DVB-T adapter. * * Copyright (C) 2007 Tomi Orava (tomimo@ncircle.nullnet.fi) * * Based on the dvb-usb-framework code and the * original Terratec Cinergy T2 driver by: * * Copyright (C) 2004 Daniel Mack <daniel@qanu.de> and * Holger Waechtler <holger@qanu.de> * * Protocol Spec published on http://qanu.de/specs/terratec_cinergyT2.pdf */ #include "cinergyT2.h" /* * convert linux-dvb frontend parameter set into TPS. * See ETSI ETS-300744, section 4.6.2, table 9 for details. * * This function is probably reusable and may better get placed in a support * library. * * We replace erroneous fields by default TPS fields (the ones with value 0). */ static uint16_t compute_tps(struct dtv_frontend_properties *op) { uint16_t tps = 0; switch (op->code_rate_HP) { case FEC_2_3: tps |= (1 << 7); break; case FEC_3_4: tps |= (2 << 7); break; case FEC_5_6: tps |= (3 << 7); break; case FEC_7_8: tps |= (4 << 7); break; case FEC_1_2: case FEC_AUTO: default: /* tps |= (0 << 7) */; } switch (op->code_rate_LP) { case FEC_2_3: tps |= (1 << 4); break; case FEC_3_4: tps |= (2 << 4); break; case FEC_5_6: tps |= (3 << 4); break; case FEC_7_8: tps |= (4 << 4); break; case FEC_1_2: case FEC_AUTO: default: /* tps |= (0 << 4) */; } switch (op->modulation) { case QAM_16: tps |= (1 << 13); break; case QAM_64: tps |= (2 << 13); break; case QPSK: default: /* tps |= (0 << 13) */; } switch (op->transmission_mode) { case TRANSMISSION_MODE_8K: tps |= (1 << 0); break; case TRANSMISSION_MODE_2K: default: /* tps |= (0 << 0) */; } switch (op->guard_interval) { case GUARD_INTERVAL_1_16: tps |= (1 << 2); break; case GUARD_INTERVAL_1_8: tps |= (2 << 2); break; case GUARD_INTERVAL_1_4: tps |= (3 << 2); break; case GUARD_INTERVAL_1_32: default: /* tps |= (0 << 2) */; } switch (op->hierarchy) { case HIERARCHY_1: tps |= (1 << 10); break; case HIERARCHY_2: tps |= (2 << 10); break; case HIERARCHY_4: tps |= (3 << 10); break; case HIERARCHY_NONE: default: /* tps |= (0 << 10) */; } return tps; } struct cinergyt2_fe_state { struct dvb_frontend fe; struct dvb_usb_device *d; unsigned char data[64]; struct mutex data_mutex; struct dvbt_get_status_msg status; }; static int cinergyt2_fe_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct cinergyt2_fe_state *state = fe->demodulator_priv; int ret; mutex_lock(&state->data_mutex); state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS; ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, sizeof(state->status), 0); if (!ret) memcpy(&state->status, state->data, sizeof(state->status)); mutex_unlock(&state->data_mutex); if (ret < 0) return ret; *status = 0; if (0xffff - le16_to_cpu(state->status.gain) > 30) *status |= FE_HAS_SIGNAL; if (state->status.lock_bits & (1 << 6)) *status |= FE_HAS_LOCK; if (state->status.lock_bits & (1 << 5)) *status |= FE_HAS_SYNC; if (state->status.lock_bits & (1 << 4)) *status |= FE_HAS_CARRIER; if (state->status.lock_bits & (1 << 1)) *status |= FE_HAS_VITERBI; if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) *status &= ~FE_HAS_LOCK; return 0; } static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber) { struct cinergyt2_fe_state *state = fe->demodulator_priv; *ber = le32_to_cpu(state->status.viterbi_error_rate); return 0; } static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) { struct cinergyt2_fe_state *state = fe->demodulator_priv; *unc = le32_to_cpu(state->status.uncorrected_block_count); return 0; } static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct cinergyt2_fe_state *state = fe->demodulator_priv; *strength = (0xffff - le16_to_cpu(state->status.gain)); return 0; } static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr) { struct cinergyt2_fe_state *state = fe->demodulator_priv; *snr = (state->status.snr << 8) | state->status.snr; return 0; } static int cinergyt2_fe_init(struct dvb_frontend *fe) { return 0; } static int cinergyt2_fe_sleep(struct dvb_frontend *fe) { deb_info("cinergyt2_fe_sleep() Called\n"); return 0; } static int cinergyt2_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 800; return 0; } static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct cinergyt2_fe_state *state = fe->demodulator_priv; struct dvbt_set_parameters_msg *param; int err; mutex_lock(&state->data_mutex); param = (void *)state->data; param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; param->tps = cpu_to_le16(compute_tps(fep)); param->freq = cpu_to_le32(fep->frequency / 1000); param->flags = 0; switch (fep->bandwidth_hz) { default: case 8000000: param->bandwidth = 8; break; case 7000000: param->bandwidth = 7; break; case 6000000: param->bandwidth = 6; break; } err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param), state->data, 2, 0); if (err < 0) err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err); mutex_unlock(&state->data_mutex); return (err < 0) ? err : 0; } static void cinergyt2_fe_release(struct dvb_frontend *fe) { struct cinergyt2_fe_state *state = fe->demodulator_priv; kfree(state); } static const struct dvb_frontend_ops cinergyt2_fe_ops; struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d) { struct cinergyt2_fe_state *s = kzalloc(sizeof( struct cinergyt2_fe_state), GFP_KERNEL); if (s == NULL) return NULL; s->d = d; memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops)); s->fe.demodulator_priv = s; mutex_init(&s->data_mutex); return &s->fe; } static const struct dvb_frontend_ops cinergyt2_fe_ops = { .delsys = { SYS_DVBT }, .info = { .name = DRIVER_NAME, .frequency_min_hz = 174 * MHz, .frequency_max_hz = 862 * MHz, .frequency_stepsize_hz = 166667, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .release = cinergyt2_fe_release, .init = cinergyt2_fe_init, .sleep = cinergyt2_fe_sleep, .set_frontend = cinergyt2_fe_set_frontend, .get_tune_settings = cinergyt2_fe_get_tune_settings, .read_status = cinergyt2_fe_read_status, .read_ber = cinergyt2_fe_read_ber, .read_signal_strength = cinergyt2_fe_read_signal_strength, .read_snr = cinergyt2_fe_read_snr, .read_ucblocks = cinergyt2_fe_read_unc_blocks, }; |
198 197 197 196 198 197 196 196 197 198 196 119 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | #ifndef _LINUX_JHASH_H #define _LINUX_JHASH_H /* jhash.h: Jenkins hash support. * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * * https://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * * lookup3.c, by Bob Jenkins, May 2006, Public Domain. * * These are functions for producing 32-bit hashes for hash table lookup. * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() * are externally useful functions. Routines to test the hash are included * if SELF_TEST is defined. You can use this free for any purpose. It's in * the public domain. It has no warranty. * * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org) * * I've modified Bob's hash to be useful in the Linux kernel, and * any bugs present are my fault. * Jozsef */ #include <linux/bitops.h> #include <linux/unaligned/packed_struct.h> /* Best hash sizes are of power of two */ #define jhash_size(n) ((u32)1<<(n)) /* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ #define jhash_mask(n) (jhash_size(n)-1) /* __jhash_mix - mix 3 32-bit values reversibly. */ #define __jhash_mix(a, b, c) \ { \ a -= c; a ^= rol32(c, 4); c += b; \ b -= a; b ^= rol32(a, 6); a += c; \ c -= b; c ^= rol32(b, 8); b += a; \ a -= c; a ^= rol32(c, 16); c += b; \ b -= a; b ^= rol32(a, 19); a += c; \ c -= b; c ^= rol32(b, 4); b += a; \ } /* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */ #define __jhash_final(a, b, c) \ { \ c ^= b; c -= rol32(b, 14); \ a ^= c; a -= rol32(c, 11); \ b ^= a; b -= rol32(a, 25); \ c ^= b; c -= rol32(b, 16); \ a ^= c; a -= rol32(c, 4); \ b ^= a; b -= rol32(a, 14); \ c ^= b; c -= rol32(b, 24); \ } /* An arbitrary initial parameter */ #define JHASH_INITVAL 0xdeadbeef /* jhash - hash an arbitrary key * @k: sequence of bytes as key * @length: the length of the key * @initval: the previous hash, or an arbitrary value * * The generic version, hashes an arbitrary sequence of bytes. * No alignment or length assumptions are made about the input key. * * Returns the hash value of the key. The result depends on endianness. */ static inline u32 jhash(const void *key, u32 length, u32 initval) { u32 a, b, c; const u8 *k = key; /* Set up the internal state */ a = b = c = JHASH_INITVAL + length + initval; /* All but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += __get_unaligned_cpu32(k); b += __get_unaligned_cpu32(k + 4); c += __get_unaligned_cpu32(k + 8); __jhash_mix(a, b, c); length -= 12; k += 12; } /* Last block: affect all 32 bits of (c) */ switch (length) { case 12: c += (u32)k[11]<<24; fallthrough; case 11: c += (u32)k[10]<<16; fallthrough; case 10: c += (u32)k[9]<<8; fallthrough; case 9: c += k[8]; fallthrough; case 8: b += (u32)k[7]<<24; fallthrough; case 7: b += (u32)k[6]<<16; fallthrough; case 6: b += (u32)k[5]<<8; fallthrough; case 5: b += k[4]; fallthrough; case 4: a += (u32)k[3]<<24; fallthrough; case 3: a += (u32)k[2]<<16; fallthrough; case 2: a += (u32)k[1]<<8; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); break; case 0: /* Nothing left to add */ break; } return c; } /* jhash2 - hash an array of u32's * @k: the key which must be an array of u32's * @length: the number of u32's in the key * @initval: the previous hash, or an arbitrary value * * Returns the hash value of the key. */ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) { u32 a, b, c; /* Set up the internal state */ a = b = c = JHASH_INITVAL + (length<<2) + initval; /* Handle most of the key */ while (length > 3) { a += k[0]; b += k[1]; c += k[2]; __jhash_mix(a, b, c); length -= 3; k += 3; } /* Handle the last 3 u32's */ switch (length) { case 3: c += k[2]; fallthrough; case 2: b += k[1]; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); break; case 0: /* Nothing left to add */ break; } return c; } /* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) { a += initval; b += initval; c += initval; __jhash_final(a, b, c); return c; } static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) { return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); } static inline u32 jhash_2words(u32 a, u32 b, u32 initval) { return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); } static inline u32 jhash_1word(u32 a, u32 initval) { return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); } #endif /* _LINUX_JHASH_H */ |
146 144 41 40 40 41 2147 2140 2144 8 8 8 14 13 2 150 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* internal.h: mm/ internal definitions * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __MM_INTERNAL_H #define __MM_INTERNAL_H #include <linux/fs.h> #include <linux/khugepaged.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/pagemap.h> #include <linux/pagewalk.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/swap_cgroup.h> #include <linux/tracepoint-defs.h> /* Internal core VMA manipulation functions. */ #include "vma.h" struct folio_batch; /* * Maintains state across a page table move. The operation assumes both source * and destination VMAs already exist and are specified by the user. * * Partial moves are permitted, but the old and new ranges must both reside * within a VMA. * * mmap lock must be held in write and VMA write locks must be held on any VMA * that is visible. * * Use the PAGETABLE_MOVE() macro to initialise this struct. * * The old_addr and new_addr fields are updated as the page table move is * executed. * * NOTE: The page table move is affected by reading from [old_addr, old_end), * and old_addr may be updated for better page table alignment, so len_in * represents the length of the range being copied as specified by the user. */ struct pagetable_move_control { struct vm_area_struct *old; /* Source VMA. */ struct vm_area_struct *new; /* Destination VMA. */ unsigned long old_addr; /* Address from which the move begins. */ unsigned long old_end; /* Exclusive address at which old range ends. */ unsigned long new_addr; /* Address to move page tables to. */ unsigned long len_in; /* Bytes to remap specified by user. */ bool need_rmap_locks; /* Do rmap locks need to be taken? */ bool for_stack; /* Is this an early temp stack being moved? */ }; #define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ struct pagetable_move_control name = { \ .old = old_, \ .new = new_, \ .old_addr = old_addr_, \ .old_end = (old_addr_) + (len_), \ .new_addr = new_addr_, \ .len_in = len_, \ } /* * The set of flags that only affect watermark checking and reclaim * behaviour. This is used by the MM to obey the caller constraints * about IO, FS and watermark checking while ignoring placement * hints such as HIGHMEM usage. */ #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ __GFP_NOLOCKDEP) /* The GFP flags allowed during early boot */ #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) /* Control allocation cpuset and node placement constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) /* * Different from WARN_ON_ONCE(), no warning will be issued * when we specify __GFP_NOWARN. */ #define WARN_ON_ONCE_GFP(cond, gfp) ({ \ static bool __section(".data..once") __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \ __warned = true; \ WARN_ON(1); \ } \ unlikely(__ret_warn_once); \ }) void page_writeback_init(void); /* * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages, * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently * leaves nr_pages_mapped at 0, but avoid surprise if it participates later. */ #define ENTIRELY_MAPPED 0x800000 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1) /* * Flags passed to __show_mem() and show_free_areas() to suppress output in * various contexts. */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ /* * How many individual pages have an elevated _mapcount. Excludes * the folio's entire_mapcount. * * Don't use this function outside of debugging code. */ static inline int folio_nr_pages_mapped(const struct folio *folio) { if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) return -1; return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; } /* * Retrieve the first entry of a folio based on a provided entry within the * folio. We cannot rely on folio->swap as there is no guarantee that it has * been initialized. Used for calling arch_swap_restore() */ static inline swp_entry_t folio_swap(swp_entry_t entry, const struct folio *folio) { swp_entry_t swap = { .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), }; return swap; } static inline void *folio_raw_mapping(const struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; return (void *)(mapping & ~PAGE_MAPPING_FLAGS); } /* * This is a file-backed mapping, and is about to be memory mapped - invoke its * mmap hook and safely handle error conditions. On error, VMA hooks will be * mutated. * * @file: File which backs the mapping. * @vma: VMA which we are mapping. * * Returns: 0 if success, error otherwise. */ static inline int mmap_file(struct file *file, struct vm_area_struct *vma) { int err = call_mmap(file, vma); if (likely(!err)) return 0; /* * OK, we tried to call the file hook for mmap(), but an error * arose. The mapping is in an inconsistent state and we most not invoke * any further hooks on it. */ vma->vm_ops = &vma_dummy_vm_ops; return err; } /* * If the VMA has a close hook then close it, and since closing it might leave * it in an inconsistent state which makes the use of any hooks suspect, clear * them down by installing dummy empty hooks. */ static inline void vma_close(struct vm_area_struct *vma) { if (vma->vm_ops && vma->vm_ops->close) { vma->vm_ops->close(vma); /* * The mapping is in an inconsistent state, and no further hooks * may be invoked upon it. */ vma->vm_ops = &vma_dummy_vm_ops; } } #ifdef CONFIG_MMU /* Flags for folio_pte_batch(). */ typedef int __bitwise fpb_t; /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */ #define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0)) /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */ #define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1)) static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) { if (flags & FPB_IGNORE_DIRTY) pte = pte_mkclean(pte); if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) pte = pte_clear_soft_dirty(pte); return pte_wrprotect(pte_mkold(pte)); } /** * folio_pte_batch - detect a PTE batch for a large folio * @folio: The large folio to detect a PTE batch for. * @addr: The user virtual address the first page is mapped at. * @start_ptep: Page table pointer for the first entry. * @pte: Page table entry for the first page. * @max_nr: The maximum number of table entries to consider. * @flags: Flags to modify the PTE batch semantics. * @any_writable: Optional pointer to indicate whether any entry except the * first one is writable. * @any_young: Optional pointer to indicate whether any entry except the * first one is young. * @any_dirty: Optional pointer to indicate whether any entry except the * first one is dirty. * * Detect a PTE batch: consecutive (present) PTEs that map consecutive * pages of the same large folio. * * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY). * * start_ptep must map any page of the folio. max_nr must be at least one and * must be limited by the caller so scanning cannot exceed a single page table. * * Return: the number of table entries in the batch. */ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, bool *any_writable, bool *any_young, bool *any_dirty) { pte_t expected_pte, *ptep; bool writable, young, dirty; int nr, cur_nr; if (any_writable) *any_writable = false; if (any_young) *any_young = false; if (any_dirty) *any_dirty = false; VM_WARN_ON_FOLIO(!pte_present(pte), folio); VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); /* Limit max_nr to the actual remaining PFNs in the folio we could batch. */ max_nr = min_t(unsigned long, max_nr, folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte)); nr = pte_batch_hint(start_ptep, pte); expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); ptep = start_ptep + nr; while (nr < max_nr) { pte = ptep_get(ptep); if (any_writable) writable = !!pte_write(pte); if (any_young) young = !!pte_young(pte); if (any_dirty) dirty = !!pte_dirty(pte); pte = __pte_batch_clear_ignored(pte, flags); if (!pte_same(pte, expected_pte)) break; if (any_writable) *any_writable |= writable; if (any_young) *any_young |= young; if (any_dirty) *any_dirty |= dirty; cur_nr = pte_batch_hint(ptep, pte); expected_pte = pte_advance_pfn(expected_pte, cur_nr); ptep += cur_nr; nr += cur_nr; } return min(nr, max_nr); } /** * pte_move_swp_offset - Move the swap entry offset field of a swap pte * forward or backward by delta * @pte: The initial pte state; is_swap_pte(pte) must be true and * non_swap_entry() must be false. * @delta: The direction and the offset we are moving; forward if delta * is positive; backward if delta is negative * * Moves the swap offset, while maintaining all other fields, including * swap type, and any swp pte bits. The resulting pte is returned. */ static inline pte_t pte_move_swp_offset(pte_t pte, long delta) { swp_entry_t entry = pte_to_swp_entry(pte); pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), (swp_offset(entry) + delta))); if (pte_swp_soft_dirty(pte)) new = pte_swp_mksoft_dirty(new); if (pte_swp_exclusive(pte)) new = pte_swp_mkexclusive(new); if (pte_swp_uffd_wp(pte)) new = pte_swp_mkuffd_wp(new); return new; } /** * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. * @pte: The initial pte state; is_swap_pte(pte) must be true and * non_swap_entry() must be false. * * Increments the swap offset, while maintaining all other fields, including * swap type, and any swp pte bits. The resulting pte is returned. */ static inline pte_t pte_next_swp_offset(pte_t pte) { return pte_move_swp_offset(pte, 1); } /** * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries * @start_ptep: Page table pointer for the first entry. * @max_nr: The maximum number of table entries to consider. * @pte: Page table entry for the first entry. * * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs * containing swap entries all with consecutive offsets and targeting the same * swap type, all with matching swp pte bits. * * max_nr must be at least one and must be limited by the caller so scanning * cannot exceed a single page table. * * Return: the number of table entries in the batch. */ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) { pte_t expected_pte = pte_next_swp_offset(pte); const pte_t *end_ptep = start_ptep + max_nr; swp_entry_t entry = pte_to_swp_entry(pte); pte_t *ptep = start_ptep + 1; unsigned short cgroup_id; VM_WARN_ON(max_nr < 1); VM_WARN_ON(!is_swap_pte(pte)); VM_WARN_ON(non_swap_entry(entry)); cgroup_id = lookup_swap_cgroup_id(entry); while (ptep < end_ptep) { pte = ptep_get(ptep); if (!pte_same(pte, expected_pte)) break; if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id) break; expected_pte = pte_next_swp_offset(expected_pte); ptep++; } return ptep - start_ptep; } #endif /* CONFIG_MMU */ void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, int nr_throttled); static inline void acct_reclaim_writeback(struct folio *folio) { pg_data_t *pgdat = folio_pgdat(folio); int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); if (nr_throttled) __acct_reclaim_writeback(pgdat, folio, nr_throttled); } static inline void wake_throttle_isolated(pg_data_t *pgdat) { wait_queue_head_t *wqh; wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; if (waitqueue_active(wqh)) wake_up(wqh); } vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf); static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) { vm_fault_t ret = __vmf_anon_prepare(vmf); if (unlikely(ret & VM_FAULT_RETRY)) vma_end_read(vmf->vma); return ret; } vm_fault_t do_swap_page(struct vm_fault *vmf); void folio_rotate_reclaimable(struct folio *folio); bool __folio_end_writeback(struct folio *folio); void deactivate_file_folio(struct folio *folio); void folio_activate(struct folio *folio); void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling, bool mm_wr_locked); void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); struct zap_details; void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details); void zap_page_range_single_batched(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct zap_details *details); int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, gfp_t gfp); void page_cache_ra_order(struct readahead_control *, struct file_ra_state *, unsigned int order); void force_page_cache_ra(struct readahead_control *, unsigned long nr); static inline void force_page_cache_readahead(struct address_space *mapping, struct file *file, pgoff_t index, unsigned long nr_to_read) { DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); force_page_cache_ra(&ractl, nr_to_read); } unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); void filemap_free_folio(struct address_space *mapping, struct folio *folio); int truncate_inode_folio(struct address_space *mapping, struct folio *folio); bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end); long mapping_evict_folio(struct address_space *mapping, struct folio *folio); unsigned long mapping_try_invalidate(struct address_space *mapping, pgoff_t start, pgoff_t end, unsigned long *nr_failed); /** * folio_evictable - Test whether a folio is evictable. * @folio: The folio to test. * * Test whether @folio is evictable -- i.e., should be placed on * active/inactive lists vs unevictable list. * * Reasons folio might not be evictable: * 1. folio's mapping marked unevictable * 2. One of the pages in the folio is part of an mlocked VMA */ static inline bool folio_evictable(struct folio *folio) { bool ret; /* Prevent address_space of inode and swap cache from being freed */ rcu_read_lock(); ret = !mapping_unevictable(folio_mapping(folio)) && !folio_test_mlocked(folio); rcu_read_unlock(); return ret; } /* * Turn a non-refcounted page (->_refcount == 0) into refcounted with * a count of one. */ static inline void set_page_refcounted(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(page_ref_count(page), page); set_page_count(page, 1); } /* * Return true if a folio needs ->release_folio() calling upon it. */ static inline bool folio_needs_release(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); return folio_has_private(folio) || (mapping && mapping_release_always(mapping)); } extern unsigned long highest_memmap_pfn; /* * Maximum number of reclaim retries without progress before the OOM * killer is consider the only way forward. */ #define MAX_RECLAIM_RETRIES 16 /* * in mm/vmscan.c: */ bool folio_isolate_lru(struct folio *folio); void folio_putback_lru(struct folio *folio); extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); /* * in mm/rmap.c: */ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); /* * in mm/page_alloc.c */ #define K(x) ((x) << (PAGE_SHIFT-10)) extern char * const zone_names[MAX_NR_ZONES]; /* perform sanity checks on struct pages being allocated or freed */ DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); extern int min_free_kbytes; extern int defrag_mode; void setup_per_zone_wmarks(void); void calculate_min_free_kbytes(void); int __meminit init_per_zone_wmark_min(void); void page_alloc_sysctl_init(void); /* * Structure for holding the mostly immutable allocation parameters passed * between functions involved in allocations, including the alloc_pages* * family of functions. * * nodemask, migratetype and highest_zoneidx are initialized only once in * __alloc_pages() and then never change. * * zonelist, preferred_zone and highest_zoneidx are set first in * __alloc_pages() for the fast path, and might be later changed * in __alloc_pages_slowpath(). All other functions pass the whole structure * by a const pointer. */ struct alloc_context { struct zonelist *zonelist; nodemask_t *nodemask; struct zoneref *preferred_zoneref; int migratetype; /* * highest_zoneidx represents highest usable zone index of * the allocation request. Due to the nature of the zone, * memory on lower zone than the highest_zoneidx will be * protected by lowmem_reserve[highest_zoneidx]. * * highest_zoneidx is also used by reclaim/compaction to limit * the target zone since higher zone than this index cannot be * usable for this allocation request. */ enum zone_type highest_zoneidx; bool spread_dirty_pages; }; /* * This function returns the order of a free page in the buddy system. In * general, page_zone(page)->lock must be held by the caller to prevent the * page from being allocated in parallel and returning garbage as the order. * If a caller does not hold page_zone(page)->lock, it must guarantee that the * page cannot be allocated or merged in parallel. Alternatively, it must * handle invalid values gracefully, and use buddy_order_unsafe() below. */ static inline unsigned int buddy_order(struct page *page) { /* PageBuddy() must be checked by the caller */ return page_private(page); } /* * Like buddy_order(), but for callers who cannot afford to hold the zone lock. * PageBuddy() should be checked first by the caller to minimize race window, * and invalid values must be handled gracefully. * * READ_ONCE is used so that if the caller assigns the result into a local * variable and e.g. tests it for valid range before using, the compiler cannot * decide to remove the variable and inline the page_private(page) multiple * times, potentially observing different values in the tests and the actual * use of the result. */ #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) /* * This function checks whether a page is free && is the buddy * we can coalesce a page and its buddy if * (a) the buddy is not in a hole (check before calling!) && * (b) the buddy is in the buddy system && * (c) a page and its buddy have the same order && * (d) a page and its buddy are in the same zone. * * For recording whether a page is in the buddy system, we set PageBuddy. * Setting, clearing, and testing PageBuddy is serialized by zone->lock. * * For recording page's order, we use page_private(page). */ static inline bool page_is_buddy(struct page *page, struct page *buddy, unsigned int order) { if (!page_is_guard(buddy) && !PageBuddy(buddy)) return false; if (buddy_order(buddy) != order) return false; /* * zone check is done late to avoid uselessly calculating * zone/node ids for pages that could never merge. */ if (page_zone_id(page) != page_zone_id(buddy)) return false; VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); return true; } /* * Locate the struct page for both the matching buddy in our * pair (buddy1) and the combined O(n+1) page they form (page). * * 1) Any buddy B1 will have an order O twin B2 which satisfies * the following equation: * B2 = B1 ^ (1 << O) * For example, if the starting buddy (buddy2) is #8 its order * 1 buddy is #10: * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 * * 2) Any buddy B will have an order O+1 parent P which * satisfies the following equation: * P = B & ~(1 << O) * * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER */ static inline unsigned long __find_buddy_pfn(unsigned long page_pfn, unsigned int order) { return page_pfn ^ (1 << order); } /* * Find the buddy of @page and validate it. * @page: The input page * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the * function is used in the performance-critical __free_one_page(). * @order: The order of the page * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to * page_to_pfn(). * * The found buddy can be a non PageBuddy, out of @page's zone, or its order is * not the same as @page. The validation is necessary before use it. * * Return: the found buddy page or NULL if not found. */ static inline struct page *find_buddy_page_pfn(struct page *page, unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) { unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order); struct page *buddy; buddy = page + (__buddy_pfn - pfn); if (buddy_pfn) *buddy_pfn = __buddy_pfn; if (page_is_buddy(page, buddy, order)) return buddy; return NULL; } extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone); static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) { if (zone->contiguous) return pfn_to_page(start_pfn); return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); } void set_zone_contiguous(struct zone *zone); bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, unsigned long nr_pages); static inline void clear_zone_contiguous(struct zone *zone) { zone->contiguous = false; } extern int __isolate_free_page(struct page *page, unsigned int order); extern void __putback_isolated_page(struct page *page, unsigned int order, int mt); extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order, enum meminit_context context); /* * This will have no effect, other than possibly generating a warning, if the * caller passes in a non-large folio. */ static inline void folio_set_order(struct folio *folio, unsigned int order) { if (WARN_ON_ONCE(!order || !folio_test_large(folio))) return; folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; #ifdef NR_PAGES_IN_LARGE_FOLIO folio->_nr_pages = 1U << order; #endif } bool __folio_unqueue_deferred_split(struct folio *folio); static inline bool folio_unqueue_deferred_split(struct folio *folio) { if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) return false; /* * At this point, there is no one trying to add the folio to * deferred_list. If folio is not in deferred_list, it's safe * to check without acquiring the split_queue_lock. */ if (data_race(list_empty(&folio->_deferred_list))) return false; return __folio_unqueue_deferred_split(folio); } static inline struct folio *page_rmappable_folio(struct page *page) { struct folio *folio = (struct folio *)page; if (folio && folio_test_large(folio)) folio_set_large_rmappable(folio); return folio; } static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; folio_set_order(folio, order); atomic_set(&folio->_large_mapcount, -1); if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) atomic_set(&folio->_nr_pages_mapped, 0); if (IS_ENABLED(CONFIG_MM_ID)) { folio->_mm_ids = 0; folio->_mm_id_mapcount[0] = -1; folio->_mm_id_mapcount[1] = -1; } if (IS_ENABLED(CONFIG_64BIT) || order > 1) { atomic_set(&folio->_pincount, 0); atomic_set(&folio->_entire_mapcount, -1); } if (order > 1) INIT_LIST_HEAD(&folio->_deferred_list); } static inline void prep_compound_tail(struct page *head, int tail_idx) { struct page *p = head + tail_idx; p->mapping = TAIL_MAPPING; set_compound_head(p, head); set_page_private(p, 0); } void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern bool free_pages_prepare(struct page *page, unsigned int order); extern int user_min_free_kbytes; struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid, nodemask_t *); #define __alloc_frozen_pages(...) \ alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__)) void free_frozen_pages(struct page *page, unsigned int order); void free_unref_folios(struct folio_batch *fbatch); #ifdef CONFIG_NUMA struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order); #else static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order) { return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL); } #endif #define alloc_frozen_pages(...) \ alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__)) extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_disable(struct zone *zone); extern void zone_pcp_enable(struct zone *zone); extern void zone_pcp_init(struct zone *zone); extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, int nid, bool exact_nid); void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int); #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* * in mm/compaction.c */ /* * compact_control is used to track pages being migrated and the free pages * they are being migrated to during memory compaction. The free_pfn starts * at the end of a zone and migrate_pfn begins at the start. Movable pages * are moved to the end of a zone during a compaction run and the run * completes when free_pfn <= migrate_pfn */ struct compact_control { struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */ struct list_head migratepages; /* List of pages being migrated */ unsigned int nr_freepages; /* Number of isolated free pages */ unsigned int nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ /* * Acts as an in/out parameter to page isolation for migration. * isolate_migratepages uses it as a search base. * isolate_migratepages_block will update the value to the next pfn * after the last isolated one. */ unsigned long migrate_pfn; unsigned long fast_start_pfn; /* a pfn to start linear scan from */ struct zone *zone; unsigned long total_migrate_scanned; unsigned long total_free_scanned; unsigned short fast_search_fail;/* failures to use free list searches */ short search_order; /* order to start a fast search at */ const gfp_t gfp_mask; /* gfp mask of a direct compactor */ int order; /* order a direct compactor needs */ int migratetype; /* migratetype of direct compactor */ const unsigned int alloc_flags; /* alloc flags of a direct compactor */ const int highest_zoneidx; /* zone index of a direct compactor */ enum migrate_mode mode; /* Async or sync migration mode */ bool ignore_skip_hint; /* Scan blocks even if marked skip */ bool no_set_skip_hint; /* Don't mark blocks for skipping */ bool ignore_block_suitable; /* Scan blocks considered unsuitable */ bool direct_compaction; /* False from kcompactd or /proc/... */ bool proactive_compaction; /* kcompactd proactive compaction */ bool whole_zone; /* Whole zone should/has been scanned */ bool contended; /* Signal lock contention */ bool finish_pageblock; /* Scan the remainder of a pageblock. Used * when there are potentially transient * isolation or migration failures to * ensure forward progress. */ bool alloc_contig; /* alloc_contig_range allocation */ }; /* * Used in direct compaction when a page should be taken from the freelists * immediately when one is created during the free path. */ struct capture_control { struct compact_control *cc; struct page *page; }; unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); int isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ void init_cma_reserved_pageblock(struct page *page); #endif /* CONFIG_COMPACTION || CONFIG_CMA */ struct cma; #ifdef CONFIG_CMA void *cma_reserve_early(struct cma *cma, unsigned long size); void init_cma_pageblock(struct page *page); #else static inline void *cma_reserve_early(struct cma *cma, unsigned long size) { return NULL; } static inline void init_cma_pageblock(struct page *page) { } #endif int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool claimable); static inline bool free_area_empty(struct free_area *area, int migratetype) { return list_empty(&area->free_list[migratetype]); } /* mm/util.c */ struct anon_vma *folio_anon_vma(const struct folio *folio); #ifdef CONFIG_MMU void unmap_mapping_folio(struct folio *folio); extern long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *locked); extern long faultin_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, bool write, int *locked); extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes); /* * NOTE: This function can't tell whether the folio is "fully mapped" in the * range. * "fully mapped" means all the pages of folio is associated with the page * table of range while this function just check whether the folio range is * within the range [start, end). Function caller needs to do page table * check if it cares about the page table association. * * Typical usage (like mlock or madvise) is: * Caller knows at least 1 page of folio is associated with page table of VMA * and the range [start, end) is intersect with the VMA range. Caller wants * to know whether the folio is fully associated with the range. It calls * this function to check whether the folio is in the range first. Then checks * the page table to know whether the folio is fully mapped to the range. */ static inline bool folio_within_range(struct folio *folio, struct vm_area_struct *vma, unsigned long start, unsigned long end) { pgoff_t pgoff, addr; unsigned long vma_pglen = vma_pages(vma); VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio); if (start > end) return false; if (start < vma->vm_start) start = vma->vm_start; if (end > vma->vm_end) end = vma->vm_end; pgoff = folio_pgoff(folio); /* if folio start address is not in vma range */ if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) return false; addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); return !(addr < start || end - addr < folio_size(folio)); } static inline bool folio_within_vma(struct folio *folio, struct vm_area_struct *vma) { return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); } /* * mlock_vma_folio() and munlock_vma_folio(): * should be called with vma's mmap_lock held for read or write, * under page table lock for the pte/pmd being added or removed. * * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at * the end of folio_remove_rmap_*(); but new anon folios are managed by * folio_add_lru_vma() calling mlock_new_folio(). */ void mlock_folio(struct folio *folio); static inline void mlock_vma_folio(struct folio *folio, struct vm_area_struct *vma) { /* * The VM_SPECIAL check here serves two purposes. * 1) VM_IO check prevents migration from double-counting during mlock. * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED * is never left set on a VM_SPECIAL vma, there is an interval while * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may * still be set while VM_SPECIAL bits are added: so ignore it then. */ if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) mlock_folio(folio); } void munlock_folio(struct folio *folio); static inline void munlock_vma_folio(struct folio *folio, struct vm_area_struct *vma) { /* * munlock if the function is called. Ideally, we should only * do munlock if any page of folio is unmapped from VMA and * cause folio not fully mapped to VMA. * * But it's not easy to confirm that's the situation. So we * always munlock the folio and page reclaim will correct it * if it's wrong. */ if (unlikely(vma->vm_flags & VM_LOCKED)) munlock_folio(folio); } void mlock_new_folio(struct folio *folio); bool need_mlock_drain(int cpu); void mlock_drain_local(void); void mlock_drain_remote(int cpu); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); /** * vma_address - Find the virtual address a page range is mapped at * @vma: The vma which maps this object. * @pgoff: The page offset within its object. * @nr_pages: The number of pages to consider. * * If any page in this range is mapped by this VMA, return the first address * where any of these pages appear. Otherwise, return -EFAULT. */ static inline unsigned long vma_address(const struct vm_area_struct *vma, pgoff_t pgoff, unsigned long nr_pages) { unsigned long address; if (pgoff >= vma->vm_pgoff) { address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); /* Check for address beyond vma (or wrapped through 0?) */ if (address < vma->vm_start || address >= vma->vm_end) address = -EFAULT; } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { /* Test above avoids possibility of wrap to 0 on 32-bit */ address = vma->vm_start; } else { address = -EFAULT; } return address; } /* * Then at what user virtual address will none of the range be found in vma? * Assumes that vma_address() already returned a good starting address. */ static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) { struct vm_area_struct *vma = pvmw->vma; pgoff_t pgoff; unsigned long address; /* Common case, plus ->pgoff is invalid for KSM */ if (pvmw->nr_pages == 1) return pvmw->address + PAGE_SIZE; pgoff = pvmw->pgoff + pvmw->nr_pages; address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); /* Check for address beyond vma (or wrapped through 0?) */ if (address < vma->vm_start || address > vma->vm_end) address = vma->vm_end; return address; } static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, struct file *fpin) { int flags = vmf->flags; if (fpin) return fpin; /* * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or * anything, so we only pin the file and drop the mmap_lock if only * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. */ if (fault_flag_allow_retry_first(flags) && !(flags & FAULT_FLAG_RETRY_NOWAIT)) { fpin = get_file(vmf->vma->vm_file); release_fault_lock(vmf); } return fpin; } #else /* !CONFIG_MMU */ static inline void unmap_mapping_folio(struct folio *folio) { } static inline void mlock_new_folio(struct folio *folio) { } static inline bool need_mlock_drain(int cpu) { return false; } static inline void mlock_drain_local(void) { } static inline void mlock_drain_remote(int cpu) { } static inline void vunmap_range_noflush(unsigned long start, unsigned long end) { } #endif /* !CONFIG_MMU */ /* Memory initialisation debug and verification */ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT DECLARE_STATIC_KEY_TRUE(deferred_pages); bool __init deferred_grow_zone(struct zone *zone, unsigned int order); #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ void init_deferred_page(unsigned long pfn, int nid); enum mminit_level { MMINIT_WARNING, MMINIT_VERIFY, MMINIT_TRACE }; #ifdef CONFIG_DEBUG_MEMORY_INIT extern int mminit_loglevel; #define mminit_dprintk(level, prefix, fmt, arg...) \ do { \ if (level < mminit_loglevel) { \ if (level <= MMINIT_WARNING) \ pr_warn("mminit::" prefix " " fmt, ##arg); \ else \ printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ } \ } while (0) extern void mminit_verify_pageflags_layout(void); extern void mminit_verify_zonelist(void); #else static inline void mminit_dprintk(enum mminit_level level, const char *prefix, const char *fmt, ...) { } static inline void mminit_verify_pageflags_layout(void) { } static inline void mminit_verify_zonelist(void) { } #endif /* CONFIG_DEBUG_MEMORY_INIT */ #define NODE_RECLAIM_NOSCAN -2 #define NODE_RECLAIM_FULL -1 #define NODE_RECLAIM_SOME 0 #define NODE_RECLAIM_SUCCESS 1 #ifdef CONFIG_NUMA extern int node_reclaim_mode; extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); extern int find_next_best_node(int node, nodemask_t *used_node_mask); #else #define node_reclaim_mode 0 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, unsigned int order) { return NODE_RECLAIM_NOSCAN; } static inline int find_next_best_node(int node, nodemask_t *used_node_mask) { return NUMA_NO_NODE; } #endif static inline bool node_reclaim_enabled(void) { /* Is any node_reclaim_mode bit set? */ return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); } /* * mm/memory-failure.c */ #ifdef CONFIG_MEMORY_FAILURE int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill); void shake_folio(struct folio *folio); extern int hwpoison_filter(struct page *p); extern u32 hwpoison_filter_dev_major; extern u32 hwpoison_filter_dev_minor; extern u64 hwpoison_filter_flags_mask; extern u64 hwpoison_filter_flags_value; extern u64 hwpoison_filter_memcg; extern u32 hwpoison_filter_enable; #define MAGIC_HWPOISON 0x48575053U /* HWPS */ void SetPageHWPoisonTakenOff(struct page *page); void ClearPageHWPoisonTakenOff(struct page *page); bool take_page_off_buddy(struct page *page); bool put_page_back_buddy(struct page *page); struct task_struct *task_early_kill(struct task_struct *tsk, int force_early); void add_to_kill_ksm(struct task_struct *tsk, const struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long ksm_addr); unsigned long page_mapped_in_vma(const struct page *page, struct vm_area_struct *vma); #else static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) { return -EBUSY; } #endif extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); extern void set_pageblock_order(void); struct folio *alloc_migrate_folio(struct folio *src, unsigned long private); unsigned long reclaim_pages(struct list_head *folio_list); unsigned int reclaim_clean_pages_from_list(struct zone *zone, struct list_head *folio_list); /* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW #define ALLOC_WMARK_HIGH WMARK_HIGH #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ /* Mask to get the watermark bits */ #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) /* * Only MMU archs have async oom victim reclaim - aka oom_reaper so we * cannot assume a reduced access to memory reserves is sufficient for * !MMU */ #ifdef CONFIG_MMU #define ALLOC_OOM 0x08 #else #define ALLOC_OOM ALLOC_NO_WATERMARKS #endif #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access * to 25% of the min watermark or * 62.5% if __GFP_HIGH is set. */ #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50% * of the min watermark. */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ #ifdef CONFIG_ZONE_DMA32 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ #else #define ALLOC_NOFRAGMENT 0x0 #endif #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */ #define ALLOC_TRYLOCK 0x400 /* Only use spin_trylock in allocation path */ #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ /* Flags that allow allocations below the min watermark. */ #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM) enum ttu_flags; struct tlbflush_unmap_batch; /* * only for MM internal work items which do not depend on * any allocations or locks which might depend on allocations */ extern struct workqueue_struct *mm_percpu_wq; #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); void try_to_unmap_flush_dirty(void); void flush_tlb_batched_pending(struct mm_struct *mm); #else static inline void try_to_unmap_flush(void) { } static inline void try_to_unmap_flush_dirty(void) { } static inline void flush_tlb_batched_pending(struct mm_struct *mm) { } #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ extern const struct trace_print_flags pageflag_names[]; extern const struct trace_print_flags vmaflag_names[]; extern const struct trace_print_flags gfpflag_names[]; static inline bool is_migrate_highatomic(enum migratetype migratetype) { return migratetype == MIGRATE_HIGHATOMIC; } void setup_zone_pageset(struct zone *zone); struct migration_target_control { int nid; /* preferred node id */ nodemask_t *nmask; gfp_t gfp_mask; enum migrate_reason reason; }; /* * mm/filemap.c */ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, struct folio *folio, loff_t fpos, size_t size); /* * mm/vmalloc.c */ #ifdef CONFIG_MMU void __init vmalloc_init(void); int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift); unsigned int get_vm_area_page_order(struct vm_struct *vm); #else static inline void vmalloc_init(void) { } static inline int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { return -EINVAL; } #endif int __must_check __vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift); void vunmap_range_noflush(unsigned long start, unsigned long end); void __vunmap_range_noflush(unsigned long start, unsigned long end); int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int *flags, bool writable, int *last_cpupid); void free_zone_device_folio(struct folio *folio); int migrate_device_coherent_folio(struct folio *folio); struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long shift, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller); /* * mm/gup.c */ int __must_check try_grab_folio(struct folio *folio, int refs, unsigned int flags); /* * mm/huge_memory.c */ void touch_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, bool write); void touch_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, bool write); /* * Parses a string with mem suffixes into its order. Useful to parse kernel * parameters. */ static inline int get_order_from_str(const char *size_str, unsigned long valid_orders) { unsigned long size; char *endptr; int order; size = memparse(size_str, &endptr); if (!is_power_of_2(size)) return -EINVAL; order = get_order(size); if (BIT(order) & ~valid_orders) return -EINVAL; return order; } enum { /* mark page accessed */ FOLL_TOUCH = 1 << 16, /* a retry, previous pass started an IO */ FOLL_TRIED = 1 << 17, /* we are working on non-current tsk/mm */ FOLL_REMOTE = 1 << 18, /* pages must be released via unpin_user_page */ FOLL_PIN = 1 << 19, /* gup_fast: prevent fall-back to slow gup */ FOLL_FAST_ONLY = 1 << 20, /* allow unlocking the mmap lock */ FOLL_UNLOCKABLE = 1 << 21, /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */ FOLL_MADV_POPULATE = 1 << 22, }; #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \ FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \ FOLL_MADV_POPULATE) /* * Indicates for which pages that are write-protected in the page table, * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the * GUP pin will remain consistent with the pages mapped into the page tables * of the MM. * * Temporary unmapping of PageAnonExclusive() pages or clearing of * PageAnonExclusive() has to protect against concurrent GUP: * * Ordinary GUP: Using the PT lock * * GUP-fast and fork(): mm->write_protect_seq * * GUP-fast and KSM or temporary unmapping (swap, migration): see * folio_try_share_anon_rmap_*() * * Must be called with the (sub)page that's actually referenced via the * page table entry, which might not necessarily be the head page for a * PTE-mapped THP. * * If the vma is NULL, we're coming from the GUP-fast path and might have * to fallback to the slow path just to lookup the vma. */ static inline bool gup_must_unshare(struct vm_area_struct *vma, unsigned int flags, struct page *page) { /* * FOLL_WRITE is implicitly handled correctly as the page table entry * has to be writable -- and if it references (part of) an anonymous * folio, that part is required to be marked exclusive. */ if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN) return false; /* * Note: PageAnon(page) is stable until the page is actually getting * freed. */ if (!PageAnon(page)) { /* * We only care about R/O long-term pining: R/O short-term * pinning does not have the semantics to observe successive * changes through the process page tables. */ if (!(flags & FOLL_LONGTERM)) return false; /* We really need the vma ... */ if (!vma) return true; /* * ... because we only care about writable private ("COW") * mappings where we have to break COW early. */ return is_cow_mapping(vma->vm_flags); } /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */ if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_rmb(); /* * Note that KSM pages cannot be exclusive, and consequently, * cannot get pinned. */ return !PageAnonExclusive(page); } extern bool mirrored_kernelcore; bool memblock_has_mirror(void); void memblock_free_all(void); static __always_inline void vma_set_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff) { vma->vm_start = start; vma->vm_end = end; vma->vm_pgoff = pgoff; } static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) { /* * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty * enablements, because when without soft-dirty being compiled in, * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY) * will be constantly true. */ if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) return false; /* * Soft-dirty is kind of special: its tracking is enabled when the * vma flags not set. */ return !(vma->vm_flags & VM_SOFTDIRTY); } static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd) { return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd); } static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte) { return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte); } void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid); void __meminit __init_page_from_nid(unsigned long pfn, int nid); /* shrinker related functions */ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority); #ifdef CONFIG_SHRINKER_DEBUG static inline __printf(2, 0) int shrinker_debugfs_name_alloc( struct shrinker *shrinker, const char *fmt, va_list ap) { shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); return shrinker->name ? 0 : -ENOMEM; } static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) { kfree_const(shrinker->name); shrinker->name = NULL; } extern int shrinker_debugfs_add(struct shrinker *shrinker); extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, int *debugfs_id); extern void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id); #else /* CONFIG_SHRINKER_DEBUG */ static inline int shrinker_debugfs_add(struct shrinker *shrinker) { return 0; } static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker, const char *fmt, va_list ap) { return 0; } static inline void shrinker_debugfs_name_free(struct shrinker *shrinker) { } static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, int *debugfs_id) { *debugfs_id = -1; return NULL; } static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id) { } #endif /* CONFIG_SHRINKER_DEBUG */ /* Only track the nodes of mappings with shadow entries */ void workingset_update_node(struct xa_node *node); extern struct list_lru shadow_nodes; #define mapping_set_update(xas, mapping) do { \ if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \ xas_set_update(xas, workingset_update_node); \ xas_set_lru(xas, &shadow_nodes); \ } \ } while (0) /* mremap.c */ unsigned long move_page_tables(struct pagetable_move_control *pmc); #ifdef CONFIG_UNACCEPTED_MEMORY void accept_page(struct page *page); #else /* CONFIG_UNACCEPTED_MEMORY */ static inline void accept_page(struct page *page) { } #endif /* CONFIG_UNACCEPTED_MEMORY */ /* pagewalk.c */ int walk_page_range_mm(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private); /* pt_reclaim.c */ bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval); void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb, pmd_t pmdval); void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, struct mmu_gather *tlb); #ifdef CONFIG_PT_RECLAIM bool reclaim_pt_is_enabled(unsigned long start, unsigned long end, struct zap_details *details); #else static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end, struct zap_details *details) { return false; } #endif /* CONFIG_PT_RECLAIM */ void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm); int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm); #endif /* __MM_INTERNAL_H */ |
3 2 2 3 3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 | // SPDX-License-Identifier: GPL-2.0-only /* * Siano core API module * * This file contains implementation for the interface to sms core component * * author: Uri Shkolnik * * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. */ #include "smscoreapi.h" #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/wait.h> #include <asm/byteorder.h> #include "sms-cards.h" #include "smsir.h" struct smscore_device_notifyee_t { struct list_head entry; hotplug_t hotplug; }; struct smscore_idlist_t { struct list_head entry; int id; int data_type; }; struct smscore_client_t { struct list_head entry; struct smscore_device_t *coredev; void *context; struct list_head idlist; onresponse_t onresponse_handler; onremove_t onremove_handler; }; static char *siano_msgs[] = { [MSG_TYPE_BASE_VAL - MSG_TYPE_BASE_VAL] = "MSG_TYPE_BASE_VAL", [MSG_SMS_GET_VERSION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_REQ", [MSG_SMS_GET_VERSION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_RES", [MSG_SMS_MULTI_BRIDGE_CFG - MSG_TYPE_BASE_VAL] = "MSG_SMS_MULTI_BRIDGE_CFG", [MSG_SMS_GPIO_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_REQ", [MSG_SMS_GPIO_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_RES", [MSG_SMS_GPIO_SET_LEVEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_SET_LEVEL_REQ", [MSG_SMS_GPIO_SET_LEVEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_SET_LEVEL_RES", [MSG_SMS_GPIO_GET_LEVEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_GET_LEVEL_REQ", [MSG_SMS_GPIO_GET_LEVEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_GET_LEVEL_RES", [MSG_SMS_EEPROM_BURN_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_EEPROM_BURN_IND", [MSG_SMS_LOG_ENABLE_CHANGE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_ENABLE_CHANGE_REQ", [MSG_SMS_LOG_ENABLE_CHANGE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_ENABLE_CHANGE_RES", [MSG_SMS_SET_MAX_TX_MSG_LEN_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_MAX_TX_MSG_LEN_REQ", [MSG_SMS_SET_MAX_TX_MSG_LEN_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_MAX_TX_MSG_LEN_RES", [MSG_SMS_SPI_HALFDUPLEX_TOKEN_HOST_TO_DEVICE - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_HALFDUPLEX_TOKEN_HOST_TO_DEVICE", [MSG_SMS_SPI_HALFDUPLEX_TOKEN_DEVICE_TO_HOST - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_HALFDUPLEX_TOKEN_DEVICE_TO_HOST", [MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_REQ", [MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_RES", [MSG_SMS_BACKGROUND_SCAN_SIGNAL_DETECTED_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_SIGNAL_DETECTED_IND", [MSG_SMS_BACKGROUND_SCAN_NO_SIGNAL_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_NO_SIGNAL_IND", [MSG_SMS_CONFIGURE_RF_SWITCH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIGURE_RF_SWITCH_REQ", [MSG_SMS_CONFIGURE_RF_SWITCH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIGURE_RF_SWITCH_RES", [MSG_SMS_MRC_PATH_DISCONNECT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_PATH_DISCONNECT_REQ", [MSG_SMS_MRC_PATH_DISCONNECT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_PATH_DISCONNECT_RES", [MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_REQ", [MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_RES", [MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_REQ", [MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_RES", [MSG_WR_REG_RFT_REQ - MSG_TYPE_BASE_VAL] = "MSG_WR_REG_RFT_REQ", [MSG_WR_REG_RFT_RES - MSG_TYPE_BASE_VAL] = "MSG_WR_REG_RFT_RES", [MSG_RD_REG_RFT_REQ - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_RFT_REQ", [MSG_RD_REG_RFT_RES - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_RFT_RES", [MSG_RD_REG_ALL_RFT_REQ - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_ALL_RFT_REQ", [MSG_RD_REG_ALL_RFT_RES - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_ALL_RFT_RES", [MSG_HELP_INT - MSG_TYPE_BASE_VAL] = "MSG_HELP_INT", [MSG_RUN_SCRIPT_INT - MSG_TYPE_BASE_VAL] = "MSG_RUN_SCRIPT_INT", [MSG_SMS_EWS_INBAND_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EWS_INBAND_REQ", [MSG_SMS_EWS_INBAND_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EWS_INBAND_RES", [MSG_SMS_RFS_SELECT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RFS_SELECT_REQ", [MSG_SMS_RFS_SELECT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RFS_SELECT_RES", [MSG_SMS_MB_GET_VER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_GET_VER_REQ", [MSG_SMS_MB_GET_VER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_GET_VER_RES", [MSG_SMS_MB_WRITE_CFGFILE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_WRITE_CFGFILE_REQ", [MSG_SMS_MB_WRITE_CFGFILE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_WRITE_CFGFILE_RES", [MSG_SMS_MB_READ_CFGFILE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_READ_CFGFILE_REQ", [MSG_SMS_MB_READ_CFGFILE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_READ_CFGFILE_RES", [MSG_SMS_RD_MEM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RD_MEM_REQ", [MSG_SMS_RD_MEM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RD_MEM_RES", [MSG_SMS_WR_MEM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_WR_MEM_REQ", [MSG_SMS_WR_MEM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_WR_MEM_RES", [MSG_SMS_UPDATE_MEM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_UPDATE_MEM_REQ", [MSG_SMS_UPDATE_MEM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_UPDATE_MEM_RES", [MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_REQ", [MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_RES", [MSG_SMS_RF_TUNE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RF_TUNE_REQ", [MSG_SMS_RF_TUNE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RF_TUNE_RES", [MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_REQ", [MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_RES", [MSG_SMS_ISDBT_SB_RECEPTION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_SB_RECEPTION_REQ", [MSG_SMS_ISDBT_SB_RECEPTION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_SB_RECEPTION_RES", [MSG_SMS_GENERIC_EPROM_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_WRITE_REQ", [MSG_SMS_GENERIC_EPROM_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_WRITE_RES", [MSG_SMS_GENERIC_EPROM_READ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_READ_REQ", [MSG_SMS_GENERIC_EPROM_READ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_READ_RES", [MSG_SMS_EEPROM_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EEPROM_WRITE_REQ", [MSG_SMS_EEPROM_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EEPROM_WRITE_RES", [MSG_SMS_CUSTOM_READ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_READ_REQ", [MSG_SMS_CUSTOM_READ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_READ_RES", [MSG_SMS_CUSTOM_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_WRITE_REQ", [MSG_SMS_CUSTOM_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_WRITE_RES", [MSG_SMS_INIT_DEVICE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_INIT_DEVICE_REQ", [MSG_SMS_INIT_DEVICE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_INIT_DEVICE_RES", [MSG_SMS_ATSC_SET_ALL_IP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_SET_ALL_IP_REQ", [MSG_SMS_ATSC_SET_ALL_IP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_SET_ALL_IP_RES", [MSG_SMS_ATSC_START_ENSEMBLE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_START_ENSEMBLE_REQ", [MSG_SMS_ATSC_START_ENSEMBLE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_START_ENSEMBLE_RES", [MSG_SMS_SET_OUTPUT_MODE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_OUTPUT_MODE_REQ", [MSG_SMS_SET_OUTPUT_MODE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_OUTPUT_MODE_RES", [MSG_SMS_ATSC_IP_FILTER_GET_LIST_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_GET_LIST_REQ", [MSG_SMS_ATSC_IP_FILTER_GET_LIST_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_GET_LIST_RES", [MSG_SMS_SUB_CHANNEL_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_START_REQ", [MSG_SMS_SUB_CHANNEL_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_START_RES", [MSG_SMS_SUB_CHANNEL_STOP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_STOP_REQ", [MSG_SMS_SUB_CHANNEL_STOP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_STOP_RES", [MSG_SMS_ATSC_IP_FILTER_ADD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_ADD_REQ", [MSG_SMS_ATSC_IP_FILTER_ADD_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_ADD_RES", [MSG_SMS_ATSC_IP_FILTER_REMOVE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_REQ", [MSG_SMS_ATSC_IP_FILTER_REMOVE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_RES", [MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_REQ", [MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_RES", [MSG_SMS_WAIT_CMD - MSG_TYPE_BASE_VAL] = "MSG_SMS_WAIT_CMD", [MSG_SMS_ADD_PID_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ADD_PID_FILTER_REQ", [MSG_SMS_ADD_PID_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ADD_PID_FILTER_RES", [MSG_SMS_REMOVE_PID_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_REMOVE_PID_FILTER_REQ", [MSG_SMS_REMOVE_PID_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_REMOVE_PID_FILTER_RES", [MSG_SMS_FAST_INFORMATION_CHANNEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FAST_INFORMATION_CHANNEL_REQ", [MSG_SMS_FAST_INFORMATION_CHANNEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_FAST_INFORMATION_CHANNEL_RES", [MSG_SMS_DAB_CHANNEL - MSG_TYPE_BASE_VAL] = "MSG_SMS_DAB_CHANNEL", [MSG_SMS_GET_PID_FILTER_LIST_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_PID_FILTER_LIST_REQ", [MSG_SMS_GET_PID_FILTER_LIST_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_PID_FILTER_LIST_RES", [MSG_SMS_POWER_DOWN_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_REQ", [MSG_SMS_POWER_DOWN_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_RES", [MSG_SMS_ATSC_SLT_EXIST_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_SLT_EXIST_IND", [MSG_SMS_ATSC_NO_SLT_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_NO_SLT_IND", [MSG_SMS_GET_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_REQ", [MSG_SMS_GET_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_RES", [MSG_SMS_SEND_DUMP - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_DUMP", [MSG_SMS_SCAN_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_START_REQ", [MSG_SMS_SCAN_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_START_RES", [MSG_SMS_SCAN_STOP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_STOP_REQ", [MSG_SMS_SCAN_STOP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_STOP_RES", [MSG_SMS_SCAN_PROGRESS_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_PROGRESS_IND", [MSG_SMS_SCAN_COMPLETE_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_COMPLETE_IND", [MSG_SMS_LOG_ITEM - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_ITEM", [MSG_SMS_DAB_SUBCHANNEL_RECONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DAB_SUBCHANNEL_RECONFIG_REQ", [MSG_SMS_DAB_SUBCHANNEL_RECONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DAB_SUBCHANNEL_RECONFIG_RES", [MSG_SMS_HO_PER_SLICES_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PER_SLICES_IND", [MSG_SMS_HO_INBAND_POWER_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_INBAND_POWER_IND", [MSG_SMS_MANUAL_DEMOD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MANUAL_DEMOD_REQ", [MSG_SMS_HO_TUNE_ON_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_ON_REQ", [MSG_SMS_HO_TUNE_ON_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_ON_RES", [MSG_SMS_HO_TUNE_OFF_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_OFF_REQ", [MSG_SMS_HO_TUNE_OFF_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_OFF_RES", [MSG_SMS_HO_PEEK_FREQ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PEEK_FREQ_REQ", [MSG_SMS_HO_PEEK_FREQ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PEEK_FREQ_RES", [MSG_SMS_HO_PEEK_FREQ_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PEEK_FREQ_IND", [MSG_SMS_MB_ATTEN_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_ATTEN_SET_REQ", [MSG_SMS_MB_ATTEN_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_ATTEN_SET_RES", [MSG_SMS_ENABLE_STAT_IN_I2C_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_STAT_IN_I2C_REQ", [MSG_SMS_ENABLE_STAT_IN_I2C_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_STAT_IN_I2C_RES", [MSG_SMS_SET_ANTENNA_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_ANTENNA_CONFIG_REQ", [MSG_SMS_SET_ANTENNA_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_ANTENNA_CONFIG_RES", [MSG_SMS_GET_STATISTICS_EX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_EX_REQ", [MSG_SMS_GET_STATISTICS_EX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_EX_RES", [MSG_SMS_SLEEP_RESUME_COMP_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SLEEP_RESUME_COMP_IND", [MSG_SMS_SWITCH_HOST_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWITCH_HOST_INTERFACE_REQ", [MSG_SMS_SWITCH_HOST_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWITCH_HOST_INTERFACE_RES", [MSG_SMS_DATA_DOWNLOAD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_DOWNLOAD_REQ", [MSG_SMS_DATA_DOWNLOAD_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_DOWNLOAD_RES", [MSG_SMS_DATA_VALIDITY_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_VALIDITY_REQ", [MSG_SMS_DATA_VALIDITY_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_VALIDITY_RES", [MSG_SMS_SWDOWNLOAD_TRIGGER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_TRIGGER_REQ", [MSG_SMS_SWDOWNLOAD_TRIGGER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_TRIGGER_RES", [MSG_SMS_SWDOWNLOAD_BACKDOOR_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_BACKDOOR_REQ", [MSG_SMS_SWDOWNLOAD_BACKDOOR_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_BACKDOOR_RES", [MSG_SMS_GET_VERSION_EX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_EX_REQ", [MSG_SMS_GET_VERSION_EX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_EX_RES", [MSG_SMS_CLOCK_OUTPUT_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CLOCK_OUTPUT_CONFIG_REQ", [MSG_SMS_CLOCK_OUTPUT_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CLOCK_OUTPUT_CONFIG_RES", [MSG_SMS_I2C_SET_FREQ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_I2C_SET_FREQ_REQ", [MSG_SMS_I2C_SET_FREQ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_I2C_SET_FREQ_RES", [MSG_SMS_GENERIC_I2C_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_I2C_REQ", [MSG_SMS_GENERIC_I2C_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_I2C_RES", [MSG_SMS_DVBT_BDA_DATA - MSG_TYPE_BASE_VAL] = "MSG_SMS_DVBT_BDA_DATA", [MSG_SW_RELOAD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_REQ", [MSG_SMS_DATA_MSG - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_MSG", [MSG_TABLE_UPLOAD_REQ - MSG_TYPE_BASE_VAL] = "MSG_TABLE_UPLOAD_REQ", [MSG_TABLE_UPLOAD_RES - MSG_TYPE_BASE_VAL] = "MSG_TABLE_UPLOAD_RES", [MSG_SW_RELOAD_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_START_REQ", [MSG_SW_RELOAD_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_START_RES", [MSG_SW_RELOAD_EXEC_REQ - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_EXEC_REQ", [MSG_SW_RELOAD_EXEC_RES - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_EXEC_RES", [MSG_SMS_SPI_INT_LINE_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_INT_LINE_SET_REQ", [MSG_SMS_SPI_INT_LINE_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_INT_LINE_SET_RES", [MSG_SMS_GPIO_CONFIG_EX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_EX_REQ", [MSG_SMS_GPIO_CONFIG_EX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_EX_RES", [MSG_SMS_WATCHDOG_ACT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_WATCHDOG_ACT_REQ", [MSG_SMS_WATCHDOG_ACT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_WATCHDOG_ACT_RES", [MSG_SMS_LOOPBACK_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOOPBACK_REQ", [MSG_SMS_LOOPBACK_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOOPBACK_RES", [MSG_SMS_RAW_CAPTURE_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_START_REQ", [MSG_SMS_RAW_CAPTURE_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_START_RES", [MSG_SMS_RAW_CAPTURE_ABORT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_ABORT_REQ", [MSG_SMS_RAW_CAPTURE_ABORT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_ABORT_RES", [MSG_SMS_RAW_CAPTURE_COMPLETE_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_COMPLETE_IND", [MSG_SMS_DATA_PUMP_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_PUMP_IND", [MSG_SMS_DATA_PUMP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_PUMP_REQ", [MSG_SMS_DATA_PUMP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_PUMP_RES", [MSG_SMS_FLASH_DL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FLASH_DL_REQ", [MSG_SMS_EXEC_TEST_1_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_REQ", [MSG_SMS_EXEC_TEST_1_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_RES", [MSG_SMS_ENABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_REQ", [MSG_SMS_ENABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_RES", [MSG_SMS_SPI_SET_BUS_WIDTH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_REQ", [MSG_SMS_SPI_SET_BUS_WIDTH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_RES", [MSG_SMS_SEND_EMM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_REQ", [MSG_SMS_SEND_EMM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_RES", [MSG_SMS_DISABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DISABLE_TS_INTERFACE_REQ", [MSG_SMS_DISABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DISABLE_TS_INTERFACE_RES", [MSG_SMS_IS_BUF_FREE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_IS_BUF_FREE_REQ", [MSG_SMS_IS_BUF_FREE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_IS_BUF_FREE_RES", [MSG_SMS_EXT_ANTENNA_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXT_ANTENNA_REQ", [MSG_SMS_EXT_ANTENNA_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXT_ANTENNA_RES", [MSG_SMS_CMMB_GET_NET_OF_FREQ_REQ_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NET_OF_FREQ_REQ_OBSOLETE", [MSG_SMS_CMMB_GET_NET_OF_FREQ_RES_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NET_OF_FREQ_RES_OBSOLETE", [MSG_SMS_BATTERY_LEVEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_BATTERY_LEVEL_REQ", [MSG_SMS_BATTERY_LEVEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_BATTERY_LEVEL_RES", [MSG_SMS_CMMB_INJECT_TABLE_REQ_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_INJECT_TABLE_REQ_OBSOLETE", [MSG_SMS_CMMB_INJECT_TABLE_RES_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_INJECT_TABLE_RES_OBSOLETE", [MSG_SMS_FM_RADIO_BLOCK_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_FM_RADIO_BLOCK_IND", [MSG_SMS_HOST_NOTIFICATION_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HOST_NOTIFICATION_IND", [MSG_SMS_CMMB_GET_CONTROL_TABLE_REQ_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_CONTROL_TABLE_REQ_OBSOLETE", [MSG_SMS_CMMB_GET_CONTROL_TABLE_RES_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_CONTROL_TABLE_RES_OBSOLETE", [MSG_SMS_CMMB_GET_NETWORKS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NETWORKS_REQ", [MSG_SMS_CMMB_GET_NETWORKS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NETWORKS_RES", [MSG_SMS_CMMB_START_SERVICE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_SERVICE_REQ", [MSG_SMS_CMMB_START_SERVICE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_SERVICE_RES", [MSG_SMS_CMMB_STOP_SERVICE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_SERVICE_REQ", [MSG_SMS_CMMB_STOP_SERVICE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_SERVICE_RES", [MSG_SMS_CMMB_ADD_CHANNEL_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_ADD_CHANNEL_FILTER_REQ", [MSG_SMS_CMMB_ADD_CHANNEL_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_ADD_CHANNEL_FILTER_RES", [MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_REQ", [MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_RES", [MSG_SMS_CMMB_START_CONTROL_INFO_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_CONTROL_INFO_REQ", [MSG_SMS_CMMB_START_CONTROL_INFO_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_CONTROL_INFO_RES", [MSG_SMS_CMMB_STOP_CONTROL_INFO_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_CONTROL_INFO_REQ", [MSG_SMS_CMMB_STOP_CONTROL_INFO_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_CONTROL_INFO_RES", [MSG_SMS_ISDBT_TUNE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_TUNE_REQ", [MSG_SMS_ISDBT_TUNE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_TUNE_RES", [MSG_SMS_TRANSMISSION_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_TRANSMISSION_IND", [MSG_SMS_PID_STATISTICS_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_PID_STATISTICS_IND", [MSG_SMS_POWER_DOWN_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_IND", [MSG_SMS_POWER_DOWN_CONF - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_CONF", [MSG_SMS_POWER_UP_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_UP_IND", [MSG_SMS_POWER_UP_CONF - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_UP_CONF", [MSG_SMS_POWER_MODE_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_MODE_SET_REQ", [MSG_SMS_POWER_MODE_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_MODE_SET_RES", [MSG_SMS_DEBUG_HOST_EVENT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DEBUG_HOST_EVENT_REQ", [MSG_SMS_DEBUG_HOST_EVENT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DEBUG_HOST_EVENT_RES", [MSG_SMS_NEW_CRYSTAL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NEW_CRYSTAL_REQ", [MSG_SMS_NEW_CRYSTAL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NEW_CRYSTAL_RES", [MSG_SMS_CONFIG_SPI_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIG_SPI_REQ", [MSG_SMS_CONFIG_SPI_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIG_SPI_RES", [MSG_SMS_I2C_SHORT_STAT_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_I2C_SHORT_STAT_IND", [MSG_SMS_START_IR_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_START_IR_REQ", [MSG_SMS_START_IR_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_START_IR_RES", [MSG_SMS_IR_SAMPLES_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_IR_SAMPLES_IND", [MSG_SMS_CMMB_CA_SERVICE_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_CA_SERVICE_IND", [MSG_SMS_SLAVE_DEVICE_DETECTED - MSG_TYPE_BASE_VAL] = "MSG_SMS_SLAVE_DEVICE_DETECTED", [MSG_SMS_INTERFACE_LOCK_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_INTERFACE_LOCK_IND", [MSG_SMS_INTERFACE_UNLOCK_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_INTERFACE_UNLOCK_IND", [MSG_SMS_SEND_ROSUM_BUFF_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_ROSUM_BUFF_REQ", [MSG_SMS_SEND_ROSUM_BUFF_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_ROSUM_BUFF_RES", [MSG_SMS_ROSUM_BUFF - MSG_TYPE_BASE_VAL] = "MSG_SMS_ROSUM_BUFF", [MSG_SMS_SET_AES128_KEY_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_AES128_KEY_REQ", [MSG_SMS_SET_AES128_KEY_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_AES128_KEY_RES", [MSG_SMS_MBBMS_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MBBMS_WRITE_REQ", [MSG_SMS_MBBMS_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MBBMS_WRITE_RES", [MSG_SMS_MBBMS_READ_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_MBBMS_READ_IND", [MSG_SMS_IQ_STREAM_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_START_REQ", [MSG_SMS_IQ_STREAM_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_START_RES", [MSG_SMS_IQ_STREAM_STOP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_STOP_REQ", [MSG_SMS_IQ_STREAM_STOP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_STOP_RES", [MSG_SMS_IQ_STREAM_DATA_BLOCK - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_DATA_BLOCK", [MSG_SMS_GET_EEPROM_VERSION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_EEPROM_VERSION_REQ", [MSG_SMS_GET_EEPROM_VERSION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_EEPROM_VERSION_RES", [MSG_SMS_SIGNAL_DETECTED_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SIGNAL_DETECTED_IND", [MSG_SMS_NO_SIGNAL_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_NO_SIGNAL_IND", [MSG_SMS_MRC_SHUTDOWN_SLAVE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_SHUTDOWN_SLAVE_REQ", [MSG_SMS_MRC_SHUTDOWN_SLAVE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_SHUTDOWN_SLAVE_RES", [MSG_SMS_MRC_BRINGUP_SLAVE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_BRINGUP_SLAVE_REQ", [MSG_SMS_MRC_BRINGUP_SLAVE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_BRINGUP_SLAVE_RES", [MSG_SMS_EXTERNAL_LNA_CTRL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXTERNAL_LNA_CTRL_REQ", [MSG_SMS_EXTERNAL_LNA_CTRL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXTERNAL_LNA_CTRL_RES", [MSG_SMS_SET_PERIODIC_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_PERIODIC_STATISTICS_REQ", [MSG_SMS_SET_PERIODIC_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_PERIODIC_STATISTICS_RES", [MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_REQ", [MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_RES", [LOCAL_TUNE - MSG_TYPE_BASE_VAL] = "LOCAL_TUNE", [LOCAL_IFFT_H_ICI - MSG_TYPE_BASE_VAL] = "LOCAL_IFFT_H_ICI", [MSG_RESYNC_REQ - MSG_TYPE_BASE_VAL] = "MSG_RESYNC_REQ", [MSG_SMS_CMMB_GET_MRC_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_MRC_STATISTICS_REQ", [MSG_SMS_CMMB_GET_MRC_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_MRC_STATISTICS_RES", [MSG_SMS_LOG_EX_ITEM - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_EX_ITEM", [MSG_SMS_DEVICE_DATA_LOSS_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_DEVICE_DATA_LOSS_IND", [MSG_SMS_MRC_WATCHDOG_TRIGGERED_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_WATCHDOG_TRIGGERED_IND", [MSG_SMS_USER_MSG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_USER_MSG_REQ", [MSG_SMS_USER_MSG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_USER_MSG_RES", [MSG_SMS_SMART_CARD_INIT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_INIT_REQ", [MSG_SMS_SMART_CARD_INIT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_INIT_RES", [MSG_SMS_SMART_CARD_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_WRITE_REQ", [MSG_SMS_SMART_CARD_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_WRITE_RES", [MSG_SMS_SMART_CARD_READ_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_READ_IND", [MSG_SMS_TSE_ENABLE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_TSE_ENABLE_REQ", [MSG_SMS_TSE_ENABLE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_TSE_ENABLE_RES", [MSG_SMS_CMMB_GET_SHORT_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_SHORT_STATISTICS_REQ", [MSG_SMS_CMMB_GET_SHORT_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_SHORT_STATISTICS_RES", [MSG_SMS_LED_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_LED_CONFIG_REQ", [MSG_SMS_LED_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_LED_CONFIG_RES", [MSG_PWM_ANTENNA_REQ - MSG_TYPE_BASE_VAL] = "MSG_PWM_ANTENNA_REQ", [MSG_PWM_ANTENNA_RES - MSG_TYPE_BASE_VAL] = "MSG_PWM_ANTENNA_RES", [MSG_SMS_CMMB_SMD_SN_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SMD_SN_REQ", [MSG_SMS_CMMB_SMD_SN_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SMD_SN_RES", [MSG_SMS_CMMB_SET_CA_CW_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_CW_REQ", [MSG_SMS_CMMB_SET_CA_CW_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_CW_RES", [MSG_SMS_CMMB_SET_CA_SALT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_SALT_REQ", [MSG_SMS_CMMB_SET_CA_SALT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_SALT_RES", [MSG_SMS_NSCD_INIT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_INIT_REQ", [MSG_SMS_NSCD_INIT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_INIT_RES", [MSG_SMS_NSCD_PROCESS_SECTION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_PROCESS_SECTION_REQ", [MSG_SMS_NSCD_PROCESS_SECTION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_PROCESS_SECTION_RES", [MSG_SMS_DBD_CREATE_OBJECT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CREATE_OBJECT_REQ", [MSG_SMS_DBD_CREATE_OBJECT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CREATE_OBJECT_RES", [MSG_SMS_DBD_CONFIGURE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CONFIGURE_REQ", [MSG_SMS_DBD_CONFIGURE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CONFIGURE_RES", [MSG_SMS_DBD_SET_KEYS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_SET_KEYS_REQ", [MSG_SMS_DBD_SET_KEYS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_SET_KEYS_RES", [MSG_SMS_DBD_PROCESS_HEADER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_HEADER_REQ", [MSG_SMS_DBD_PROCESS_HEADER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_HEADER_RES", [MSG_SMS_DBD_PROCESS_DATA_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_DATA_REQ", [MSG_SMS_DBD_PROCESS_DATA_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_DATA_RES", [MSG_SMS_DBD_PROCESS_GET_DATA_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_GET_DATA_REQ", [MSG_SMS_DBD_PROCESS_GET_DATA_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_GET_DATA_RES", [MSG_SMS_NSCD_OPEN_SESSION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_OPEN_SESSION_REQ", [MSG_SMS_NSCD_OPEN_SESSION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_OPEN_SESSION_RES", [MSG_SMS_SEND_HOST_DATA_TO_DEMUX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_HOST_DATA_TO_DEMUX_REQ", [MSG_SMS_SEND_HOST_DATA_TO_DEMUX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_HOST_DATA_TO_DEMUX_RES", [MSG_LAST_MSG_TYPE - MSG_TYPE_BASE_VAL] = "MSG_LAST_MSG_TYPE", }; char *smscore_translate_msg(enum msg_types msgtype) { int i = msgtype - MSG_TYPE_BASE_VAL; char *msg; if (i < 0 || i >= ARRAY_SIZE(siano_msgs)) return "Unknown msg type"; msg = siano_msgs[i]; if (!*msg) return "Unknown msg type"; return msg; } EXPORT_SYMBOL_GPL(smscore_translate_msg); void smscore_set_board_id(struct smscore_device_t *core, int id) { core->board_id = id; } int smscore_led_state(struct smscore_device_t *core, int led) { if (led >= 0) core->led_state = led; return core->led_state; } EXPORT_SYMBOL_GPL(smscore_set_board_id); int smscore_get_board_id(struct smscore_device_t *core) { return core->board_id; } EXPORT_SYMBOL_GPL(smscore_get_board_id); struct smscore_registry_entry_t { struct list_head entry; char devpath[32]; int mode; enum sms_device_type_st type; }; static struct list_head g_smscore_notifyees; static struct list_head g_smscore_devices; static DEFINE_MUTEX(g_smscore_deviceslock); static struct list_head g_smscore_registry; static DEFINE_MUTEX(g_smscore_registrylock); static int default_mode = DEVICE_MODE_NONE; module_param(default_mode, int, 0644); MODULE_PARM_DESC(default_mode, "default firmware id (device mode)"); static struct smscore_registry_entry_t *smscore_find_registry(char *devpath) { struct smscore_registry_entry_t *entry; struct list_head *next; mutex_lock(&g_smscore_registrylock); for (next = g_smscore_registry.next; next != &g_smscore_registry; next = next->next) { entry = (struct smscore_registry_entry_t *) next; if (!strncmp(entry->devpath, devpath, sizeof(entry->devpath))) { mutex_unlock(&g_smscore_registrylock); return entry; } } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (entry) { entry->mode = default_mode; strscpy(entry->devpath, devpath, sizeof(entry->devpath)); list_add(&entry->entry, &g_smscore_registry); } else pr_err("failed to create smscore_registry.\n"); mutex_unlock(&g_smscore_registrylock); return entry; } int smscore_registry_getmode(char *devpath) { struct smscore_registry_entry_t *entry; entry = smscore_find_registry(devpath); if (entry) return entry->mode; else pr_err("No registry found.\n"); return default_mode; } EXPORT_SYMBOL_GPL(smscore_registry_getmode); static enum sms_device_type_st smscore_registry_gettype(char *devpath) { struct smscore_registry_entry_t *entry; entry = smscore_find_registry(devpath); if (entry) return entry->type; else pr_err("No registry found.\n"); return -EINVAL; } static void smscore_registry_setmode(char *devpath, int mode) { struct smscore_registry_entry_t *entry; entry = smscore_find_registry(devpath); if (entry) entry->mode = mode; else pr_err("No registry found.\n"); } static void smscore_registry_settype(char *devpath, enum sms_device_type_st type) { struct smscore_registry_entry_t *entry; entry = smscore_find_registry(devpath); if (entry) entry->type = type; else pr_err("No registry found.\n"); } static void list_add_locked(struct list_head *new, struct list_head *head, spinlock_t *lock) { unsigned long flags; spin_lock_irqsave(lock, flags); list_add(new, head); spin_unlock_irqrestore(lock, flags); } /* * register a client callback that called when device plugged in/unplugged * NOTE: if devices exist callback is called immediately for each device * * @param hotplug callback * * return: 0 on success, <0 on error. */ int smscore_register_hotplug(hotplug_t hotplug) { struct smscore_device_notifyee_t *notifyee; struct list_head *next, *first; int rc = 0; mutex_lock(&g_smscore_deviceslock); notifyee = kmalloc(sizeof(*notifyee), GFP_KERNEL); if (notifyee) { /* now notify callback about existing devices */ first = &g_smscore_devices; for (next = first->next; next != first && !rc; next = next->next) { struct smscore_device_t *coredev = (struct smscore_device_t *) next; rc = hotplug(coredev, coredev->device, 1); } if (rc >= 0) { notifyee->hotplug = hotplug; list_add(¬ifyee->entry, &g_smscore_notifyees); } else kfree(notifyee); } else rc = -ENOMEM; mutex_unlock(&g_smscore_deviceslock); return rc; } EXPORT_SYMBOL_GPL(smscore_register_hotplug); /* * unregister a client callback that called when device plugged in/unplugged * * @param hotplug callback * */ void smscore_unregister_hotplug(hotplug_t hotplug) { struct list_head *next, *first; mutex_lock(&g_smscore_deviceslock); first = &g_smscore_notifyees; for (next = first->next; next != first;) { struct smscore_device_notifyee_t *notifyee = (struct smscore_device_notifyee_t *) next; next = next->next; if (notifyee->hotplug == hotplug) { list_del(¬ifyee->entry); kfree(notifyee); } } mutex_unlock(&g_smscore_deviceslock); } EXPORT_SYMBOL_GPL(smscore_unregister_hotplug); static void smscore_notify_clients(struct smscore_device_t *coredev) { struct smscore_client_t *client; /* the client must call smscore_unregister_client from remove handler */ while (!list_empty(&coredev->clients)) { client = (struct smscore_client_t *) coredev->clients.next; client->onremove_handler(client->context); } } static int smscore_notify_callbacks(struct smscore_device_t *coredev, struct device *device, int arrival) { struct smscore_device_notifyee_t *elem; int rc = 0; /* note: must be called under g_deviceslock */ list_for_each_entry(elem, &g_smscore_notifyees, entry) { rc = elem->hotplug(coredev, device, arrival); if (rc < 0) break; } return rc; } static struct smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer, dma_addr_t common_buffer_phys) { struct smscore_buffer_t *cb; cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return NULL; cb->p = buffer; cb->offset_in_common = buffer - (u8 *) common_buffer; if (common_buffer_phys) cb->phys = common_buffer_phys + cb->offset_in_common; return cb; } /* * creates coredev object for a device, prepares buffers, * creates buffer mappings, notifies registered hotplugs about new device. * * @param params device pointer to struct with device specific parameters * and handlers * @param coredev pointer to a value that receives created coredev object * * return: 0 on success, <0 on error. */ int smscore_register_device(struct smsdevice_params_t *params, struct smscore_device_t **coredev, gfp_t gfp_buf_flags, void *mdev) { struct smscore_device_t *dev; u8 *buffer; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; #ifdef CONFIG_MEDIA_CONTROLLER_DVB dev->media_dev = mdev; #endif dev->gfp_buf_flags = gfp_buf_flags; /* init list entry so it could be safe in smscore_unregister_device */ INIT_LIST_HEAD(&dev->entry); /* init queues */ INIT_LIST_HEAD(&dev->clients); INIT_LIST_HEAD(&dev->buffers); /* init locks */ spin_lock_init(&dev->clientslock); spin_lock_init(&dev->bufferslock); /* init completion events */ init_completion(&dev->version_ex_done); init_completion(&dev->data_download_done); init_completion(&dev->data_validity_done); init_completion(&dev->trigger_done); init_completion(&dev->init_device_done); init_completion(&dev->reload_start_done); init_completion(&dev->resume_done); init_completion(&dev->gpio_configuration_done); init_completion(&dev->gpio_set_level_done); init_completion(&dev->gpio_get_level_done); init_completion(&dev->ir_init_done); /* Buffer management */ init_waitqueue_head(&dev->buffer_mng_waitq); /* alloc common buffer */ dev->common_buffer_size = params->buffer_size * params->num_buffers; if (params->usb_device) buffer = kzalloc(dev->common_buffer_size, GFP_KERNEL); else buffer = dma_alloc_coherent(params->device, dev->common_buffer_size, &dev->common_buffer_phys, GFP_KERNEL | dev->gfp_buf_flags); if (!buffer) { smscore_unregister_device(dev); return -ENOMEM; } dev->common_buffer = buffer; /* prepare dma buffers */ for (; dev->num_buffers < params->num_buffers; dev->num_buffers++, buffer += params->buffer_size) { struct smscore_buffer_t *cb; cb = smscore_createbuffer(buffer, dev->common_buffer, dev->common_buffer_phys); if (!cb) { smscore_unregister_device(dev); return -ENOMEM; } smscore_putbuffer(dev, cb); } pr_debug("allocated %d buffers\n", dev->num_buffers); dev->mode = DEVICE_MODE_NONE; dev->board_id = SMS_BOARD_UNKNOWN; dev->context = params->context; dev->device = params->device; dev->usb_device = params->usb_device; dev->setmode_handler = params->setmode_handler; dev->detectmode_handler = params->detectmode_handler; dev->sendrequest_handler = params->sendrequest_handler; dev->preload_handler = params->preload_handler; dev->postload_handler = params->postload_handler; dev->device_flags = params->flags; strscpy(dev->devpath, params->devpath, sizeof(dev->devpath)); smscore_registry_settype(dev->devpath, params->device_type); /* add device to devices list */ mutex_lock(&g_smscore_deviceslock); list_add(&dev->entry, &g_smscore_devices); mutex_unlock(&g_smscore_deviceslock); *coredev = dev; pr_debug("device %p created\n", dev); return 0; } EXPORT_SYMBOL_GPL(smscore_register_device); static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev, void *buffer, size_t size, struct completion *completion) { int rc; if (!completion) return -EINVAL; init_completion(completion); rc = coredev->sendrequest_handler(coredev->context, buffer, size); if (rc < 0) { pr_info("sendrequest returned error %d\n", rc); return rc; } return wait_for_completion_timeout(completion, msecs_to_jiffies(SMS_PROTOCOL_MAX_RAOUNDTRIP_MS)) ? 0 : -ETIME; } /* * Starts & enables IR operations * * return: 0 on success, < 0 on error. */ static int smscore_init_ir(struct smscore_device_t *coredev) { int ir_io; int rc; void *buffer; coredev->ir.dev = NULL; ir_io = sms_get_board(smscore_get_board_id(coredev))->board_cfg.ir; if (ir_io) {/* only if IR port exist we use IR sub-module */ pr_debug("IR loading\n"); rc = sms_ir_init(coredev); if (rc != 0) pr_err("Error initialization DTV IR sub-module\n"); else { buffer = kmalloc(sizeof(struct sms_msg_data2) + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); if (buffer) { struct sms_msg_data2 *msg = (struct sms_msg_data2 *) SMS_ALIGN_ADDRESS(buffer); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_START_IR_REQ, sizeof(struct sms_msg_data2)); msg->msg_data[0] = coredev->ir.controller; msg->msg_data[1] = coredev->ir.timeout; rc = smscore_sendrequest_and_wait(coredev, msg, msg->x_msg_header. msg_length, &coredev->ir_init_done); kfree(buffer); } else pr_err("Sending IR initialization message failed\n"); } } else pr_info("IR port has not been detected\n"); return 0; } /* * configures device features according to board configuration structure. * * @param coredev pointer to a coredev object returned by * smscore_register_device * * return: 0 on success, <0 on error. */ static int smscore_configure_board(struct smscore_device_t *coredev) { struct sms_board *board; board = sms_get_board(coredev->board_id); if (!board) { pr_err("no board configuration exist.\n"); return -EINVAL; } if (board->mtu) { struct sms_msg_data mtu_msg; pr_debug("set max transmit unit %d\n", board->mtu); mtu_msg.x_msg_header.msg_src_id = 0; mtu_msg.x_msg_header.msg_dst_id = HIF_TASK; mtu_msg.x_msg_header.msg_flags = 0; mtu_msg.x_msg_header.msg_type = MSG_SMS_SET_MAX_TX_MSG_LEN_REQ; mtu_msg.x_msg_header.msg_length = sizeof(mtu_msg); mtu_msg.msg_data = board->mtu; coredev->sendrequest_handler(coredev->context, &mtu_msg, sizeof(mtu_msg)); } if (board->crystal) { struct sms_msg_data crys_msg; pr_debug("set crystal value %d\n", board->crystal); SMS_INIT_MSG(&crys_msg.x_msg_header, MSG_SMS_NEW_CRYSTAL_REQ, sizeof(crys_msg)); crys_msg.msg_data = board->crystal; coredev->sendrequest_handler(coredev->context, &crys_msg, sizeof(crys_msg)); } return 0; } /* * sets initial device mode and notifies client hotplugs that device is ready * * @param coredev pointer to a coredev object returned by * smscore_register_device * * return: 0 on success, <0 on error. */ int smscore_start_device(struct smscore_device_t *coredev) { int rc; int board_id = smscore_get_board_id(coredev); int mode = smscore_registry_getmode(coredev->devpath); /* Device is initialized as DEVICE_MODE_NONE */ if (board_id != SMS_BOARD_UNKNOWN && mode == DEVICE_MODE_NONE) mode = sms_get_board(board_id)->default_mode; rc = smscore_set_device_mode(coredev, mode); if (rc < 0) { pr_info("set device mode failed , rc %d\n", rc); return rc; } rc = smscore_configure_board(coredev); if (rc < 0) { pr_info("configure board failed , rc %d\n", rc); return rc; } mutex_lock(&g_smscore_deviceslock); rc = smscore_notify_callbacks(coredev, coredev->device, 1); smscore_init_ir(coredev); pr_debug("device %p started, rc %d\n", coredev, rc); mutex_unlock(&g_smscore_deviceslock); return rc; } EXPORT_SYMBOL_GPL(smscore_start_device); static int smscore_load_firmware_family2(struct smscore_device_t *coredev, void *buffer, size_t size) { struct sms_firmware *firmware = (struct sms_firmware *) buffer; struct sms_msg_data5 *msg; u32 mem_address, calc_checksum = 0; u32 i, *ptr; u8 *payload = firmware->payload; int rc = 0; firmware->start_address = le32_to_cpup((__le32 *)&firmware->start_address); firmware->length = le32_to_cpup((__le32 *)&firmware->length); mem_address = firmware->start_address; pr_debug("loading FW to addr 0x%x size %d\n", mem_address, firmware->length); if (coredev->preload_handler) { rc = coredev->preload_handler(coredev->context); if (rc < 0) return rc; } /* PAGE_SIZE buffer shall be enough and dma aligned */ msg = kmalloc(PAGE_SIZE, GFP_KERNEL | coredev->gfp_buf_flags); if (!msg) return -ENOMEM; if (coredev->mode != DEVICE_MODE_NONE) { pr_debug("sending reload command.\n"); SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_START_REQ, sizeof(struct sms_msg_hdr)); rc = smscore_sendrequest_and_wait(coredev, msg, msg->x_msg_header.msg_length, &coredev->reload_start_done); if (rc < 0) { pr_err("device reload failed, rc %d\n", rc); goto exit_fw_download; } mem_address = *(u32 *) &payload[20]; } for (i = 0, ptr = (u32 *)firmware->payload; i < firmware->length/4 ; i++, ptr++) calc_checksum += *ptr; while (size && rc >= 0) { struct sms_data_download *data_msg = (struct sms_data_download *) msg; int payload_size = min_t(int, size, SMS_MAX_PAYLOAD_SIZE); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_DOWNLOAD_REQ, (u16)(sizeof(struct sms_msg_hdr) + sizeof(u32) + payload_size)); data_msg->mem_addr = mem_address; memcpy(data_msg->payload, payload, payload_size); rc = smscore_sendrequest_and_wait(coredev, data_msg, data_msg->x_msg_header.msg_length, &coredev->data_download_done); payload += payload_size; size -= payload_size; mem_address += payload_size; } if (rc < 0) goto exit_fw_download; pr_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x\n", calc_checksum); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_VALIDITY_REQ, sizeof(msg->x_msg_header) + sizeof(u32) * 3); msg->msg_data[0] = firmware->start_address; /* Entry point */ msg->msg_data[1] = firmware->length; msg->msg_data[2] = 0; /* Regular checksum*/ rc = smscore_sendrequest_and_wait(coredev, msg, msg->x_msg_header.msg_length, &coredev->data_validity_done); if (rc < 0) goto exit_fw_download; if (coredev->mode == DEVICE_MODE_NONE) { pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n"); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_SWDOWNLOAD_TRIGGER_REQ, sizeof(*msg)); msg->msg_data[0] = firmware->start_address; /* Entry point */ msg->msg_data[1] = 6; /* Priority */ msg->msg_data[2] = 0x200; /* Stack size */ msg->msg_data[3] = 0; /* Parameter */ msg->msg_data[4] = 4; /* Task ID */ rc = smscore_sendrequest_and_wait(coredev, msg, msg->x_msg_header.msg_length, &coredev->trigger_done); } else { SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ, sizeof(struct sms_msg_hdr)); rc = coredev->sendrequest_handler(coredev->context, msg, msg->x_msg_header.msg_length); } if (rc < 0) goto exit_fw_download; /* * backward compatibility - wait to device_ready_done for * not more than 400 ms */ msleep(400); exit_fw_download: kfree(msg); if (coredev->postload_handler) { pr_debug("rc=%d, postload=0x%p\n", rc, coredev->postload_handler); if (rc >= 0) return coredev->postload_handler(coredev->context); } pr_debug("rc=%d\n", rc); return rc; } static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = { [SMS_NOVA_A0] = { [DEVICE_MODE_DVBT] = SMS_FW_DVB_NOVA_12MHZ, [DEVICE_MODE_DVBH] = SMS_FW_DVB_NOVA_12MHZ, [DEVICE_MODE_DAB_TDMB] = SMS_FW_TDMB_NOVA_12MHZ, [DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ, [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_NOVA_12MHZ, [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_NOVA_12MHZ, }, [SMS_NOVA_B0] = { [DEVICE_MODE_DVBT] = SMS_FW_DVB_NOVA_12MHZ_B0, [DEVICE_MODE_DVBH] = SMS_FW_DVB_NOVA_12MHZ_B0, [DEVICE_MODE_DAB_TDMB] = SMS_FW_TDMB_NOVA_12MHZ_B0, [DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ_B0, [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_NOVA_12MHZ_B0, [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_NOVA_12MHZ_B0, [DEVICE_MODE_FM_RADIO] = SMS_FW_FM_RADIO, [DEVICE_MODE_FM_RADIO_BDA] = SMS_FW_FM_RADIO, }, [SMS_VEGA] = { [DEVICE_MODE_CMMB] = SMS_FW_CMMB_VEGA_12MHZ, }, [SMS_VENICE] = { [DEVICE_MODE_CMMB] = SMS_FW_CMMB_VENICE_12MHZ, }, [SMS_MING] = { [DEVICE_MODE_CMMB] = SMS_FW_CMMB_MING_APP, }, [SMS_PELE] = { [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_PELE, [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_PELE, }, [SMS_RIO] = { [DEVICE_MODE_DVBT] = SMS_FW_DVB_RIO, [DEVICE_MODE_DVBH] = SMS_FW_DVBH_RIO, [DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_RIO, [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_RIO, [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_RIO, [DEVICE_MODE_FM_RADIO] = SMS_FW_FM_RADIO_RIO, [DEVICE_MODE_FM_RADIO_BDA] = SMS_FW_FM_RADIO_RIO, }, [SMS_DENVER_1530] = { [DEVICE_MODE_ATSC] = SMS_FW_ATSC_DENVER, }, [SMS_DENVER_2160] = { [DEVICE_MODE_DAB_TDMB] = SMS_FW_TDMB_DENVER, }, }; /* * get firmware file name from one of the two mechanisms : sms_boards or * smscore_fw_lkup. * @param coredev pointer to a coredev object returned by * smscore_register_device * @param mode requested mode of operation * @param lookup if 1, always get the fw filename from smscore_fw_lkup * table. if 0, try first to get from sms_boards * * return: 0 on success, <0 on error. */ static char *smscore_get_fw_filename(struct smscore_device_t *coredev, int mode) { char **fw; int board_id = smscore_get_board_id(coredev); enum sms_device_type_st type; type = smscore_registry_gettype(coredev->devpath); /* Prevent looking outside the smscore_fw_lkup table */ if (type <= SMS_UNKNOWN_TYPE || type >= SMS_NUM_OF_DEVICE_TYPES) return NULL; if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) return NULL; pr_debug("trying to get fw name from sms_boards board_id %d mode %d\n", board_id, mode); fw = sms_get_board(board_id)->fw; if (!fw || !fw[mode]) { pr_debug("cannot find fw name in sms_boards, getting from lookup table mode %d type %d\n", mode, type); return smscore_fw_lkup[type][mode]; } return fw[mode]; } /* * loads specified firmware into a buffer and calls device loadfirmware_handler * * @param coredev pointer to a coredev object returned by * smscore_register_device * @param filename null-terminated string specifies firmware file name * @param loadfirmware_handler device handler that loads firmware * * return: 0 on success, <0 on error. */ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, int mode) { int rc = -ENOENT; u8 *fw_buf; u32 fw_buf_size; const struct firmware *fw; char *fw_filename = smscore_get_fw_filename(coredev, mode); if (!fw_filename) { pr_err("mode %d not supported on this device\n", mode); return -ENOENT; } pr_debug("Firmware name: %s\n", fw_filename); if (!(coredev->device_flags & SMS_DEVICE_FAMILY2)) return -EINVAL; rc = request_firmware(&fw, fw_filename, coredev->device); if (rc < 0) { pr_err("failed to open firmware file '%s'\n", fw_filename); return rc; } pr_debug("read fw %s, buffer size=0x%zx\n", fw_filename, fw->size); fw_buf = kmalloc(ALIGN(fw->size + sizeof(struct sms_firmware), SMS_ALLOC_ALIGNMENT), GFP_KERNEL | coredev->gfp_buf_flags); if (!fw_buf) { pr_err("failed to allocate firmware buffer\n"); rc = -ENOMEM; } else { memcpy(fw_buf, fw->data, fw->size); fw_buf_size = fw->size; rc = smscore_load_firmware_family2(coredev, fw_buf, fw_buf_size); } kfree(fw_buf); release_firmware(fw); return rc; } /* * notifies all clients registered with the device, notifies hotplugs, * frees all buffers and coredev object * * @param coredev pointer to a coredev object returned by * smscore_register_device * * return: 0 on success, <0 on error. */ void smscore_unregister_device(struct smscore_device_t *coredev) { struct smscore_buffer_t *cb; int num_buffers = 0; int retry = 0; mutex_lock(&g_smscore_deviceslock); /* Release input device (IR) resources */ sms_ir_exit(coredev); smscore_notify_clients(coredev); smscore_notify_callbacks(coredev, NULL, 0); /* at this point all buffers should be back * onresponse must no longer be called */ while (1) { while (!list_empty(&coredev->buffers)) { cb = (struct smscore_buffer_t *) coredev->buffers.next; list_del(&cb->entry); kfree(cb); num_buffers++; } if (num_buffers == coredev->num_buffers) break; if (++retry > 10) { pr_info("exiting although not all buffers released.\n"); break; } pr_debug("waiting for %d buffer(s)\n", coredev->num_buffers - num_buffers); mutex_unlock(&g_smscore_deviceslock); msleep(100); mutex_lock(&g_smscore_deviceslock); } pr_debug("freed %d buffers\n", num_buffers); if (coredev->common_buffer) { if (coredev->usb_device) kfree(coredev->common_buffer); else dma_free_coherent(coredev->device, coredev->common_buffer_size, coredev->common_buffer, coredev->common_buffer_phys); } kfree(coredev->fw_buf); list_del(&coredev->entry); kfree(coredev); mutex_unlock(&g_smscore_deviceslock); pr_debug("device %p destroyed\n", coredev); } EXPORT_SYMBOL_GPL(smscore_unregister_device); static int smscore_detect_mode(struct smscore_device_t *coredev) { void *buffer = kmalloc(sizeof(struct sms_msg_hdr) + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); struct sms_msg_hdr *msg = (struct sms_msg_hdr *) SMS_ALIGN_ADDRESS(buffer); int rc; if (!buffer) return -ENOMEM; SMS_INIT_MSG(msg, MSG_SMS_GET_VERSION_EX_REQ, sizeof(struct sms_msg_hdr)); rc = smscore_sendrequest_and_wait(coredev, msg, msg->msg_length, &coredev->version_ex_done); if (rc == -ETIME) { pr_err("MSG_SMS_GET_VERSION_EX_REQ failed first try\n"); if (wait_for_completion_timeout(&coredev->resume_done, msecs_to_jiffies(5000))) { rc = smscore_sendrequest_and_wait( coredev, msg, msg->msg_length, &coredev->version_ex_done); if (rc < 0) pr_err("MSG_SMS_GET_VERSION_EX_REQ failed second try, rc %d\n", rc); } else rc = -ETIME; } kfree(buffer); return rc; } /* * send init device request and wait for response * * @param coredev pointer to a coredev object returned by * smscore_register_device * @param mode requested mode of operation * * return: 0 on success, <0 on error. */ static int smscore_init_device(struct smscore_device_t *coredev, int mode) { void *buffer; struct sms_msg_data *msg; int rc = 0; buffer = kmalloc(sizeof(struct sms_msg_data) + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); if (!buffer) return -ENOMEM; msg = (struct sms_msg_data *)SMS_ALIGN_ADDRESS(buffer); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_INIT_DEVICE_REQ, sizeof(struct sms_msg_data)); msg->msg_data = mode; rc = smscore_sendrequest_and_wait(coredev, msg, msg->x_msg_header. msg_length, &coredev->init_device_done); kfree(buffer); return rc; } /* * calls device handler to change mode of operation * NOTE: stellar/usb may disconnect when changing mode * * @param coredev pointer to a coredev object returned by * smscore_register_device * @param mode requested mode of operation * * return: 0 on success, <0 on error. */ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode) { int rc = 0; pr_debug("set device mode to %d\n", mode); if (coredev->device_flags & SMS_DEVICE_FAMILY2) { if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) { pr_err("invalid mode specified %d\n", mode); return -EINVAL; } smscore_registry_setmode(coredev->devpath, mode); if (!(coredev->device_flags & SMS_DEVICE_NOT_READY)) { rc = smscore_detect_mode(coredev); if (rc < 0) { pr_err("mode detect failed %d\n", rc); return rc; } } if (coredev->mode == mode) { pr_debug("device mode %d already set\n", mode); return 0; } if (!(coredev->modes_supported & (1 << mode))) { rc = smscore_load_firmware_from_file(coredev, mode); if (rc >= 0) pr_debug("firmware download success\n"); } else { pr_debug("mode %d is already supported by running firmware\n", mode); } if (coredev->fw_version >= 0x800) { rc = smscore_init_device(coredev, mode); if (rc < 0) pr_err("device init failed, rc %d.\n", rc); } } else { if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) { pr_err("invalid mode specified %d\n", mode); return -EINVAL; } smscore_registry_setmode(coredev->devpath, mode); if (coredev->detectmode_handler) coredev->detectmode_handler(coredev->context, &coredev->mode); if (coredev->mode != mode && coredev->setmode_handler) rc = coredev->setmode_handler(coredev->context, mode); } if (rc >= 0) { char *buffer; coredev->mode = mode; coredev->device_flags &= ~SMS_DEVICE_NOT_READY; buffer = kmalloc(sizeof(struct sms_msg_data) + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); if (buffer) { struct sms_msg_data *msg = (struct sms_msg_data *) SMS_ALIGN_ADDRESS(buffer); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_INIT_DEVICE_REQ, sizeof(struct sms_msg_data)); msg->msg_data = mode; rc = smscore_sendrequest_and_wait( coredev, msg, msg->x_msg_header.msg_length, &coredev->init_device_done); kfree(buffer); } } if (rc < 0) pr_err("return error code %d.\n", rc); else pr_debug("Success setting device mode.\n"); return rc; } /* * calls device handler to get current mode of operation * * @param coredev pointer to a coredev object returned by * smscore_register_device * * return: current mode */ int smscore_get_device_mode(struct smscore_device_t *coredev) { return coredev->mode; } EXPORT_SYMBOL_GPL(smscore_get_device_mode); /* * find client by response id & type within the clients list. * return client handle or NULL. * * @param coredev pointer to a coredev object returned by * smscore_register_device * @param data_type client data type (SMS_DONT_CARE for all types) * @param id client id (SMS_DONT_CARE for all id) * */ static struct smscore_client_t *smscore_find_client(struct smscore_device_t *coredev, int data_type, int id) { struct list_head *first; struct smscore_client_t *client; unsigned long flags; struct list_head *firstid; struct smscore_idlist_t *client_id; spin_lock_irqsave(&coredev->clientslock, flags); first = &coredev->clients; list_for_each_entry(client, first, entry) { firstid = &client->idlist; list_for_each_entry(client_id, firstid, entry) { if ((client_id->id == id) && (client_id->data_type == data_type || (client_id->data_type == 0))) goto found; } } client = NULL; found: spin_unlock_irqrestore(&coredev->clientslock, flags); return client; } /* * find client by response id/type, call clients onresponse handler * return buffer to pool on error * * @param coredev pointer to a coredev object returned by * smscore_register_device * @param cb pointer to response buffer descriptor * */ void smscore_onresponse(struct smscore_device_t *coredev, struct smscore_buffer_t *cb) { struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) ((u8 *) cb->p + cb->offset); struct smscore_client_t *client; int rc = -EBUSY; static unsigned long last_sample_time; /* = 0; */ static int data_total; /* = 0; */ unsigned long time_now = jiffies_to_msecs(jiffies); if (!last_sample_time) last_sample_time = time_now; if (time_now - last_sample_time > 10000) { pr_debug("data rate %d bytes/secs\n", (int)((data_total * 1000) / (time_now - last_sample_time))); last_sample_time = time_now; data_total = 0; } data_total += cb->size; /* Do we need to re-route? */ if ((phdr->msg_type == MSG_SMS_HO_PER_SLICES_IND) || (phdr->msg_type == MSG_SMS_TRANSMISSION_IND)) { if (coredev->mode == DEVICE_MODE_DVBT_BDA) phdr->msg_dst_id = DVBT_BDA_CONTROL_MSG_ID; } client = smscore_find_client(coredev, phdr->msg_type, phdr->msg_dst_id); /* If no client registered for type & id, * check for control client where type is not registered */ if (client) rc = client->onresponse_handler(client->context, cb); if (rc < 0) { switch (phdr->msg_type) { case MSG_SMS_ISDBT_TUNE_RES: break; case MSG_SMS_RF_TUNE_RES: break; case MSG_SMS_SIGNAL_DETECTED_IND: break; case MSG_SMS_NO_SIGNAL_IND: break; case MSG_SMS_SPI_INT_LINE_SET_RES: break; case MSG_SMS_INTERFACE_LOCK_IND: break; case MSG_SMS_INTERFACE_UNLOCK_IND: break; case MSG_SMS_GET_VERSION_EX_RES: { struct sms_version_res *ver = (struct sms_version_res *) phdr; pr_debug("Firmware id %d prots 0x%x ver %d.%d\n", ver->firmware_id, ver->supported_protocols, ver->rom_ver_major, ver->rom_ver_minor); coredev->mode = ver->firmware_id == 255 ? DEVICE_MODE_NONE : ver->firmware_id; coredev->modes_supported = ver->supported_protocols; coredev->fw_version = ver->rom_ver_major << 8 | ver->rom_ver_minor; complete(&coredev->version_ex_done); break; } case MSG_SMS_INIT_DEVICE_RES: complete(&coredev->init_device_done); break; case MSG_SW_RELOAD_START_RES: complete(&coredev->reload_start_done); break; case MSG_SMS_DATA_VALIDITY_RES: { struct sms_msg_data *validity = (struct sms_msg_data *) phdr; pr_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x\n", validity->msg_data); complete(&coredev->data_validity_done); break; } case MSG_SMS_DATA_DOWNLOAD_RES: complete(&coredev->data_download_done); break; case MSG_SW_RELOAD_EXEC_RES: break; case MSG_SMS_SWDOWNLOAD_TRIGGER_RES: complete(&coredev->trigger_done); break; case MSG_SMS_SLEEP_RESUME_COMP_IND: complete(&coredev->resume_done); break; case MSG_SMS_GPIO_CONFIG_EX_RES: complete(&coredev->gpio_configuration_done); break; case MSG_SMS_GPIO_SET_LEVEL_RES: complete(&coredev->gpio_set_level_done); break; case MSG_SMS_GPIO_GET_LEVEL_RES: { u32 *msgdata = (u32 *) phdr; coredev->gpio_get_res = msgdata[1]; pr_debug("gpio level %d\n", coredev->gpio_get_res); complete(&coredev->gpio_get_level_done); break; } case MSG_SMS_START_IR_RES: complete(&coredev->ir_init_done); break; case MSG_SMS_IR_SAMPLES_IND: sms_ir_event(coredev, (const char *) ((char *)phdr + sizeof(struct sms_msg_hdr)), (int)phdr->msg_length - sizeof(struct sms_msg_hdr)); break; case MSG_SMS_DVBT_BDA_DATA: /* * It can be received here, if the frontend is * tuned into a valid channel and the proper firmware * is loaded. That happens when the module got removed * and re-inserted, without powering the device off */ break; default: pr_debug("message %s(%d) not handled.\n", smscore_translate_msg(phdr->msg_type), phdr->msg_type); break; } smscore_putbuffer(coredev, cb); } } EXPORT_SYMBOL_GPL(smscore_onresponse); /* * return pointer to next free buffer descriptor from core pool * * @param coredev pointer to a coredev object returned by * smscore_register_device * * return: pointer to descriptor on success, NULL on error. */ static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev) { struct smscore_buffer_t *cb = NULL; unsigned long flags; spin_lock_irqsave(&coredev->bufferslock, flags); if (!list_empty(&coredev->buffers)) { cb = (struct smscore_buffer_t *) coredev->buffers.next; list_del(&cb->entry); } spin_unlock_irqrestore(&coredev->bufferslock, flags); return cb; } struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) { struct smscore_buffer_t *cb = NULL; wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev))); return cb; } EXPORT_SYMBOL_GPL(smscore_getbuffer); /* * return buffer descriptor to a pool * * @param coredev pointer to a coredev object returned by * smscore_register_device * @param cb pointer buffer descriptor * */ void smscore_putbuffer(struct smscore_device_t *coredev, struct smscore_buffer_t *cb) { wake_up_interruptible(&coredev->buffer_mng_waitq); list_add_locked(&cb->entry, &coredev->buffers, &coredev->bufferslock); } EXPORT_SYMBOL_GPL(smscore_putbuffer); static int smscore_validate_client(struct smscore_device_t *coredev, struct smscore_client_t *client, int data_type, int id) { struct smscore_idlist_t *listentry; struct smscore_client_t *registered_client; if (!client) { pr_err("bad parameter.\n"); return -EINVAL; } registered_client = smscore_find_client(coredev, data_type, id); if (registered_client == client) return 0; if (registered_client) { pr_err("The msg ID already registered to another client.\n"); return -EEXIST; } listentry = kzalloc(sizeof(*listentry), GFP_KERNEL); if (!listentry) return -ENOMEM; listentry->id = id; listentry->data_type = data_type; list_add_locked(&listentry->entry, &client->idlist, &coredev->clientslock); return 0; } /* * creates smsclient object, check that id is taken by another client * * @param coredev pointer to a coredev object from clients hotplug * @param initial_id all messages with this id would be sent to this client * @param data_type all messages of this type would be sent to this client * @param onresponse_handler client handler that is called to * process incoming messages * @param onremove_handler client handler that is called when device is removed * @param context client-specific context * @param client pointer to a value that receives created smsclient object * * return: 0 on success, <0 on error. */ int smscore_register_client(struct smscore_device_t *coredev, struct smsclient_params_t *params, struct smscore_client_t **client) { struct smscore_client_t *newclient; /* check that no other channel with same parameters exists */ if (smscore_find_client(coredev, params->data_type, params->initial_id)) { pr_err("Client already exist.\n"); return -EEXIST; } newclient = kzalloc(sizeof(*newclient), GFP_KERNEL); if (!newclient) return -ENOMEM; INIT_LIST_HEAD(&newclient->idlist); newclient->coredev = coredev; newclient->onresponse_handler = params->onresponse_handler; newclient->onremove_handler = params->onremove_handler; newclient->context = params->context; list_add_locked(&newclient->entry, &coredev->clients, &coredev->clientslock); smscore_validate_client(coredev, newclient, params->data_type, params->initial_id); *client = newclient; pr_debug("%p %d %d\n", params->context, params->data_type, params->initial_id); return 0; } EXPORT_SYMBOL_GPL(smscore_register_client); /* * frees smsclient object and all subclients associated with it * * @param client pointer to smsclient object returned by * smscore_register_client * */ void smscore_unregister_client(struct smscore_client_t *client) { struct smscore_device_t *coredev = client->coredev; unsigned long flags; spin_lock_irqsave(&coredev->clientslock, flags); while (!list_empty(&client->idlist)) { struct smscore_idlist_t *identry = (struct smscore_idlist_t *) client->idlist.next; list_del(&identry->entry); kfree(identry); } pr_debug("%p\n", client->context); list_del(&client->entry); kfree(client); spin_unlock_irqrestore(&coredev->clientslock, flags); } EXPORT_SYMBOL_GPL(smscore_unregister_client); /* * verifies that source id is not taken by another client, * calls device handler to send requests to the device * * @param client pointer to smsclient object returned by * smscore_register_client * @param buffer pointer to a request buffer * @param size size (in bytes) of request buffer * * return: 0 on success, <0 on error. */ int smsclient_sendrequest(struct smscore_client_t *client, void *buffer, size_t size) { struct smscore_device_t *coredev; struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; int rc; if (!client) { pr_err("Got NULL client\n"); return -EINVAL; } coredev = client->coredev; /* check that no other channel with same id exists */ if (!coredev) { pr_err("Got NULL coredev\n"); return -EINVAL; } rc = smscore_validate_client(client->coredev, client, 0, phdr->msg_src_id); if (rc < 0) return rc; return coredev->sendrequest_handler(coredev->context, buffer, size); } EXPORT_SYMBOL_GPL(smsclient_sendrequest); /* old GPIO managements implementation */ int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin, struct smscore_config_gpio *pinconfig) { struct { struct sms_msg_hdr hdr; u32 data[6]; } msg; if (coredev->device_flags & SMS_DEVICE_FAMILY2) { msg.hdr.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; msg.hdr.msg_dst_id = HIF_TASK; msg.hdr.msg_flags = 0; msg.hdr.msg_type = MSG_SMS_GPIO_CONFIG_EX_REQ; msg.hdr.msg_length = sizeof(msg); msg.data[0] = pin; msg.data[1] = pinconfig->pullupdown; /* Convert slew rate for Nova: Fast(0) = 3 / Slow(1) = 0; */ msg.data[2] = pinconfig->outputslewrate == 0 ? 3 : 0; switch (pinconfig->outputdriving) { case SMS_GPIO_OUTPUTDRIVING_S_16mA: msg.data[3] = 7; /* Nova - 16mA */ break; case SMS_GPIO_OUTPUTDRIVING_S_12mA: msg.data[3] = 5; /* Nova - 11mA */ break; case SMS_GPIO_OUTPUTDRIVING_S_8mA: msg.data[3] = 3; /* Nova - 7mA */ break; case SMS_GPIO_OUTPUTDRIVING_S_4mA: default: msg.data[3] = 2; /* Nova - 4mA */ break; } msg.data[4] = pinconfig->direction; msg.data[5] = 0; } else /* TODO: SMS_DEVICE_FAMILY1 */ return -EINVAL; return coredev->sendrequest_handler(coredev->context, &msg, sizeof(msg)); } int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level) { struct { struct sms_msg_hdr hdr; u32 data[3]; } msg; if (pin > MAX_GPIO_PIN_NUMBER) return -EINVAL; msg.hdr.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; msg.hdr.msg_dst_id = HIF_TASK; msg.hdr.msg_flags = 0; msg.hdr.msg_type = MSG_SMS_GPIO_SET_LEVEL_REQ; msg.hdr.msg_length = sizeof(msg); msg.data[0] = pin; msg.data[1] = level ? 1 : 0; msg.data[2] = 0; return coredev->sendrequest_handler(coredev->context, &msg, sizeof(msg)); } /* new GPIO management implementation */ static int get_gpio_pin_params(u32 pin_num, u32 *p_translatedpin_num, u32 *p_group_num, u32 *p_group_cfg) { *p_group_cfg = 1; if (pin_num <= 1) { *p_translatedpin_num = 0; *p_group_num = 9; *p_group_cfg = 2; } else if (pin_num >= 2 && pin_num <= 6) { *p_translatedpin_num = 2; *p_group_num = 0; *p_group_cfg = 2; } else if (pin_num >= 7 && pin_num <= 11) { *p_translatedpin_num = 7; *p_group_num = 1; } else if (pin_num >= 12 && pin_num <= 15) { *p_translatedpin_num = 12; *p_group_num = 2; *p_group_cfg = 3; } else if (pin_num == 16) { *p_translatedpin_num = 16; *p_group_num = 23; } else if (pin_num >= 17 && pin_num <= 24) { *p_translatedpin_num = 17; *p_group_num = 3; } else if (pin_num == 25) { *p_translatedpin_num = 25; *p_group_num = 6; } else if (pin_num >= 26 && pin_num <= 28) { *p_translatedpin_num = 26; *p_group_num = 4; } else if (pin_num == 29) { *p_translatedpin_num = 29; *p_group_num = 5; *p_group_cfg = 2; } else if (pin_num == 30) { *p_translatedpin_num = 30; *p_group_num = 8; } else if (pin_num == 31) { *p_translatedpin_num = 31; *p_group_num = 17; } else return -1; *p_group_cfg <<= 24; return 0; } int smscore_gpio_configure(struct smscore_device_t *coredev, u8 pin_num, struct smscore_config_gpio *p_gpio_config) { u32 total_len; u32 translatedpin_num = 0; u32 group_num = 0; u32 electric_char; u32 group_cfg; void *buffer; int rc; struct set_gpio_msg { struct sms_msg_hdr x_msg_header; u32 msg_data[6]; } *p_msg; if (pin_num > MAX_GPIO_PIN_NUMBER) return -EINVAL; if (!p_gpio_config) return -EINVAL; total_len = sizeof(struct sms_msg_hdr) + (sizeof(u32) * 6); buffer = kmalloc(total_len + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); if (!buffer) return -ENOMEM; p_msg = (struct set_gpio_msg *) SMS_ALIGN_ADDRESS(buffer); p_msg->x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; p_msg->x_msg_header.msg_dst_id = HIF_TASK; p_msg->x_msg_header.msg_flags = 0; p_msg->x_msg_header.msg_length = (u16) total_len; p_msg->msg_data[0] = pin_num; if (!(coredev->device_flags & SMS_DEVICE_FAMILY2)) { p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_CONFIG_REQ; if (get_gpio_pin_params(pin_num, &translatedpin_num, &group_num, &group_cfg) != 0) { rc = -EINVAL; goto free; } p_msg->msg_data[1] = translatedpin_num; p_msg->msg_data[2] = group_num; electric_char = (p_gpio_config->pullupdown) | (p_gpio_config->inputcharacteristics << 2) | (p_gpio_config->outputslewrate << 3) | (p_gpio_config->outputdriving << 4); p_msg->msg_data[3] = electric_char; p_msg->msg_data[4] = p_gpio_config->direction; p_msg->msg_data[5] = group_cfg; } else { p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_CONFIG_EX_REQ; p_msg->msg_data[1] = p_gpio_config->pullupdown; p_msg->msg_data[2] = p_gpio_config->outputslewrate; p_msg->msg_data[3] = p_gpio_config->outputdriving; p_msg->msg_data[4] = p_gpio_config->direction; p_msg->msg_data[5] = 0; } rc = smscore_sendrequest_and_wait(coredev, p_msg, total_len, &coredev->gpio_configuration_done); if (rc != 0) { if (rc == -ETIME) pr_err("smscore_gpio_configure timeout\n"); else pr_err("smscore_gpio_configure error\n"); } free: kfree(buffer); return rc; } int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 pin_num, u8 new_level) { u32 total_len; int rc; void *buffer; struct set_gpio_msg { struct sms_msg_hdr x_msg_header; u32 msg_data[3]; /* keep it 3 ! */ } *p_msg; if ((new_level > 1) || (pin_num > MAX_GPIO_PIN_NUMBER)) return -EINVAL; total_len = sizeof(struct sms_msg_hdr) + (3 * sizeof(u32)); /* keep it 3 ! */ buffer = kmalloc(total_len + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); if (!buffer) return -ENOMEM; p_msg = (struct set_gpio_msg *) SMS_ALIGN_ADDRESS(buffer); p_msg->x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; p_msg->x_msg_header.msg_dst_id = HIF_TASK; p_msg->x_msg_header.msg_flags = 0; p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_SET_LEVEL_REQ; p_msg->x_msg_header.msg_length = (u16) total_len; p_msg->msg_data[0] = pin_num; p_msg->msg_data[1] = new_level; /* Send message to SMS */ rc = smscore_sendrequest_and_wait(coredev, p_msg, total_len, &coredev->gpio_set_level_done); if (rc != 0) { if (rc == -ETIME) pr_err("smscore_gpio_set_level timeout\n"); else pr_err("smscore_gpio_set_level error\n"); } kfree(buffer); return rc; } int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 pin_num, u8 *level) { u32 total_len; int rc; void *buffer; struct set_gpio_msg { struct sms_msg_hdr x_msg_header; u32 msg_data[2]; } *p_msg; if (pin_num > MAX_GPIO_PIN_NUMBER) return -EINVAL; total_len = sizeof(struct sms_msg_hdr) + (2 * sizeof(u32)); buffer = kmalloc(total_len + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); if (!buffer) return -ENOMEM; p_msg = (struct set_gpio_msg *) SMS_ALIGN_ADDRESS(buffer); p_msg->x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; p_msg->x_msg_header.msg_dst_id = HIF_TASK; p_msg->x_msg_header.msg_flags = 0; p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_GET_LEVEL_REQ; p_msg->x_msg_header.msg_length = (u16) total_len; p_msg->msg_data[0] = pin_num; p_msg->msg_data[1] = 0; /* Send message to SMS */ rc = smscore_sendrequest_and_wait(coredev, p_msg, total_len, &coredev->gpio_get_level_done); if (rc != 0) { if (rc == -ETIME) pr_err("smscore_gpio_get_level timeout\n"); else pr_err("smscore_gpio_get_level error\n"); } kfree(buffer); /* Its a race between other gpio_get_level() and the copy of the single * global 'coredev->gpio_get_res' to the function's variable 'level' */ *level = coredev->gpio_get_res; return rc; } static int __init smscore_module_init(void) { INIT_LIST_HEAD(&g_smscore_notifyees); INIT_LIST_HEAD(&g_smscore_devices); INIT_LIST_HEAD(&g_smscore_registry); return 0; } static void __exit smscore_module_exit(void) { mutex_lock(&g_smscore_deviceslock); while (!list_empty(&g_smscore_notifyees)) { struct smscore_device_notifyee_t *notifyee = (struct smscore_device_notifyee_t *) g_smscore_notifyees.next; list_del(¬ifyee->entry); kfree(notifyee); } mutex_unlock(&g_smscore_deviceslock); mutex_lock(&g_smscore_registrylock); while (!list_empty(&g_smscore_registry)) { struct smscore_registry_entry_t *entry = (struct smscore_registry_entry_t *) g_smscore_registry.next; list_del(&entry->entry); kfree(entry); } mutex_unlock(&g_smscore_registrylock); pr_debug("\n"); } module_init(smscore_module_init); module_exit(smscore_module_exit); MODULE_DESCRIPTION("Siano MDTV Core module"); MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>"); MODULE_LICENSE("GPL"); /* This should match what's defined at smscoreapi.h */ MODULE_FIRMWARE(SMS_FW_ATSC_DENVER); MODULE_FIRMWARE(SMS_FW_CMMB_MING_APP); MODULE_FIRMWARE(SMS_FW_CMMB_VEGA_12MHZ); MODULE_FIRMWARE(SMS_FW_CMMB_VENICE_12MHZ); MODULE_FIRMWARE(SMS_FW_DVBH_RIO); MODULE_FIRMWARE(SMS_FW_DVB_NOVA_12MHZ_B0); MODULE_FIRMWARE(SMS_FW_DVB_NOVA_12MHZ); MODULE_FIRMWARE(SMS_FW_DVB_RIO); MODULE_FIRMWARE(SMS_FW_FM_RADIO); MODULE_FIRMWARE(SMS_FW_FM_RADIO_RIO); MODULE_FIRMWARE(SMS_FW_DVBT_HCW_55XXX); MODULE_FIRMWARE(SMS_FW_ISDBT_HCW_55XXX); MODULE_FIRMWARE(SMS_FW_ISDBT_NOVA_12MHZ_B0); MODULE_FIRMWARE(SMS_FW_ISDBT_NOVA_12MHZ); MODULE_FIRMWARE(SMS_FW_ISDBT_PELE); MODULE_FIRMWARE(SMS_FW_ISDBT_RIO); MODULE_FIRMWARE(SMS_FW_DVBT_NOVA_A); MODULE_FIRMWARE(SMS_FW_DVBT_NOVA_B); MODULE_FIRMWARE(SMS_FW_DVBT_STELLAR); MODULE_FIRMWARE(SMS_FW_TDMB_DENVER); MODULE_FIRMWARE(SMS_FW_TDMB_NOVA_12MHZ_B0); MODULE_FIRMWARE(SMS_FW_TDMB_NOVA_12MHZ); |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/can/dev.h * * Definitions for the CAN network device driver interface * * Copyright (C) 2006 Andrey Volkov <avolkov@varma-el.com> * Varma Electronics Oy * * Copyright (C) 2008 Wolfgang Grandegger <wg@grandegger.com> * */ #ifndef _CAN_DEV_H #define _CAN_DEV_H #include <linux/can.h> #include <linux/can/bittiming.h> #include <linux/can/error.h> #include <linux/can/length.h> #include <linux/can/netlink.h> #include <linux/can/skb.h> #include <linux/ethtool.h> #include <linux/netdevice.h> /* * CAN mode */ enum can_mode { CAN_MODE_STOP = 0, CAN_MODE_START, CAN_MODE_SLEEP }; enum can_termination_gpio { CAN_TERMINATION_GPIO_DISABLED = 0, CAN_TERMINATION_GPIO_ENABLED, CAN_TERMINATION_GPIO_MAX, }; struct data_bittiming_params { const struct can_bittiming_const *data_bittiming_const; struct can_bittiming data_bittiming; const struct can_tdc_const *tdc_const; struct can_tdc tdc; const u32 *data_bitrate_const; unsigned int data_bitrate_const_cnt; int (*do_set_data_bittiming)(struct net_device *dev); int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv); }; /* * CAN common private data */ struct can_priv { struct net_device *dev; struct can_device_stats can_stats; const struct can_bittiming_const *bittiming_const; struct can_bittiming bittiming; struct data_bittiming_params fd; unsigned int bitrate_const_cnt; const u32 *bitrate_const; u32 bitrate_max; struct can_clock clock; unsigned int termination_const_cnt; const u16 *termination_const; u16 termination; struct gpio_desc *termination_gpio; u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX]; unsigned int echo_skb_max; struct sk_buff **echo_skb; enum can_state state; /* CAN controller features - see include/uapi/linux/can/netlink.h */ u32 ctrlmode; /* current options setting */ u32 ctrlmode_supported; /* options that can be modified by netlink */ int restart_ms; struct delayed_work restart_work; int (*do_set_bittiming)(struct net_device *dev); int (*do_set_mode)(struct net_device *dev, enum can_mode mode); int (*do_set_termination)(struct net_device *dev, u16 term); int (*do_get_state)(const struct net_device *dev, enum can_state *state); int (*do_get_berr_counter)(const struct net_device *dev, struct can_berr_counter *bec); }; static inline bool can_tdc_is_enabled(const struct can_priv *priv) { return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK); } /* * can_get_relative_tdco() - TDCO relative to the sample point * * struct can_tdc::tdco represents the absolute offset from TDCV. Some * controllers use instead an offset relative to the Sample Point (SP) * such that: * * SSP = TDCV + absolute TDCO * = TDCV + SP + relative TDCO * * -+----------- one bit ----------+-- TX pin * |<--- Sample Point --->| * * --+----------- one bit ----------+-- RX pin * |<-------- TDCV -------->| * |<------------------------>| absolute TDCO * |<--- Sample Point --->| * | |<->| relative TDCO * |<------------- Secondary Sample Point ------------>| */ static inline s32 can_get_relative_tdco(const struct can_priv *priv) { const struct can_bittiming *dbt = &priv->fd.data_bittiming; s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg + dbt->phase_seg1) * dbt->brp; return (s32)priv->fd.tdc.tdco - sample_point_in_tc; } /* helper to define static CAN controller features at device creation time */ static inline int __must_check can_set_static_ctrlmode(struct net_device *dev, u32 static_mode) { struct can_priv *priv = netdev_priv(dev); /* alloc_candev() succeeded => netdev_priv() is valid at this point */ if (priv->ctrlmode_supported & static_mode) { netdev_warn(dev, "Controller features can not be supported and static at the same time\n"); return -EINVAL; } priv->ctrlmode = static_mode; /* override MTU which was set by default in can_setup()? */ if (static_mode & CAN_CTRLMODE_FD) dev->mtu = CANFD_MTU; return 0; } static inline u32 can_get_static_ctrlmode(struct can_priv *priv) { return priv->ctrlmode & ~priv->ctrlmode_supported; } static inline bool can_is_canxl_dev_mtu(unsigned int mtu) { return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU); } /* drop skb if it does not contain a valid CAN frame for sending */ static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb) { struct can_priv *priv = netdev_priv(dev); if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) { netdev_info_once(dev, "interface in listen only mode, dropping skb\n"); kfree_skb(skb); dev->stats.tx_dropped++; return true; } return can_dropped_invalid_skb(dev, skb); } void can_setup(struct net_device *dev); struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, unsigned int txqs, unsigned int rxqs); #define alloc_candev(sizeof_priv, echo_skb_max) \ alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1) #define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \ alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count) void free_candev(struct net_device *dev); /* a candev safe wrapper around netdev_priv */ struct can_priv *safe_candev_priv(struct net_device *dev); int open_candev(struct net_device *dev); void close_candev(struct net_device *dev); int can_change_mtu(struct net_device *dev, int new_mtu); int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd); int can_ethtool_op_get_ts_info_hwts(struct net_device *dev, struct kernel_ethtool_ts_info *info); int register_candev(struct net_device *dev); void unregister_candev(struct net_device *dev); int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); const char *can_get_state_str(const enum can_state state); void can_state_get_by_berr_counter(const struct net_device *dev, const struct can_berr_counter *bec, enum can_state *tx_state, enum can_state *rx_state); void can_change_state(struct net_device *dev, struct can_frame *cf, enum can_state tx_state, enum can_state rx_state); #ifdef CONFIG_OF void of_can_transceiver(struct net_device *dev); #else static inline void of_can_transceiver(struct net_device *dev) { } #endif extern struct rtnl_link_ops can_link_ops; int can_netlink_register(void); void can_netlink_unregister(void); #endif /* !_CAN_DEV_H */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | /* SPDX-License-Identifier: GPL-2.0 */ /* taskstats_kern.h - kernel header for per-task statistics interface * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 * (C) Balbir Singh, IBM Corp. 2006 */ #ifndef _LINUX_TASKSTATS_KERN_H #define _LINUX_TASKSTATS_KERN_H #include <linux/taskstats.h> #include <linux/sched/signal.h> #include <linux/slab.h> #ifdef CONFIG_TASKSTATS extern struct kmem_cache *taskstats_cache; extern struct mutex taskstats_exit_mutex; static inline void taskstats_tgid_free(struct signal_struct *sig) { if (sig->stats) kmem_cache_free(taskstats_cache, sig->stats); } extern void taskstats_exit(struct task_struct *, int group_dead); extern void taskstats_init_early(void); #else static inline void taskstats_exit(struct task_struct *tsk, int group_dead) {} static inline void taskstats_tgid_free(struct signal_struct *sig) {} static inline void taskstats_init_early(void) {} #endif /* CONFIG_TASKSTATS */ #endif |
2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the ov9650 sensor * * Copyright (C) 2008 Erik Andrén * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "m5602_ov9650.h" static int ov9650_s_ctrl(struct v4l2_ctrl *ctrl); static void ov9650_dump_registers(struct sd *sd); static const unsigned char preinit_ov9650[][3] = { /* [INITCAM] */ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08}, {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, {BRIDGE, M5602_XB_GPIO_DAT, 0x04}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x00}, {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a}, /* Reset chip */ {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET}, /* Enable double clock */ {SENSOR, OV9650_CLKRC, 0x80}, /* Do something out of spec with the power */ {SENSOR, OV9650_OFON, 0x40} }; static const unsigned char init_ov9650[][3] = { /* [INITCAM] */ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08}, {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, {BRIDGE, M5602_XB_GPIO_DAT, 0x04}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00}, {BRIDGE, M5602_XB_GPIO_DAT, 0x00}, {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a}, /* Reset chip */ {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET}, /* One extra reset is needed in order to make the sensor behave properly when resuming from ram, could be a timing issue */ {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET}, /* Enable double clock */ {SENSOR, OV9650_CLKRC, 0x80}, /* Do something out of spec with the power */ {SENSOR, OV9650_OFON, 0x40}, /* Set fast AGC/AEC algorithm with unlimited step size */ {SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC | OV9650_AEC_UNLIM_STEP_SIZE}, {SENSOR, OV9650_CHLF, 0x10}, {SENSOR, OV9650_ARBLM, 0xbf}, {SENSOR, OV9650_ACOM38, 0x81}, /* Turn off color matrix coefficient double option */ {SENSOR, OV9650_COM16, 0x00}, /* Enable color matrix for RGB/YUV, Delay Y channel, set output Y/UV delay to 1 */ {SENSOR, OV9650_COM13, 0x19}, /* Enable digital BLC, Set output mode to U Y V Y */ {SENSOR, OV9650_TSLB, 0x0c}, /* Limit the AGC/AEC stable upper region */ {SENSOR, OV9650_COM24, 0x00}, /* Enable HREF and some out of spec things */ {SENSOR, OV9650_COM12, 0x73}, /* Set all DBLC offset signs to positive and do some out of spec stuff */ {SENSOR, OV9650_DBLC1, 0xdf}, {SENSOR, OV9650_COM21, 0x06}, {SENSOR, OV9650_RSVD35, 0x91}, /* Necessary, no camera stream without it */ {SENSOR, OV9650_RSVD16, 0x06}, {SENSOR, OV9650_RSVD94, 0x99}, {SENSOR, OV9650_RSVD95, 0x99}, {SENSOR, OV9650_RSVD96, 0x04}, /* Enable full range output */ {SENSOR, OV9650_COM15, 0x0}, /* Enable HREF at optical black, enable ADBLC bias, enable ADBLC, reset timings at format change */ {SENSOR, OV9650_COM6, 0x4b}, /* Subtract 32 from the B channel bias */ {SENSOR, OV9650_BBIAS, 0xa0}, /* Subtract 32 from the Gb channel bias */ {SENSOR, OV9650_GbBIAS, 0xa0}, /* Do not bypass the analog BLC and to some out of spec stuff */ {SENSOR, OV9650_Gr_COM, 0x00}, /* Subtract 32 from the R channel bias */ {SENSOR, OV9650_RBIAS, 0xa0}, /* Subtract 32 from the R channel bias */ {SENSOR, OV9650_RBIAS, 0x0}, {SENSOR, OV9650_COM26, 0x80}, {SENSOR, OV9650_ACOMA9, 0x98}, /* Set the AGC/AEC stable region upper limit */ {SENSOR, OV9650_AEW, 0x68}, /* Set the AGC/AEC stable region lower limit */ {SENSOR, OV9650_AEB, 0x5c}, /* Set the high and low limit nibbles to 3 */ {SENSOR, OV9650_VPT, 0xc3}, /* Set the Automatic Gain Ceiling (AGC) to 128x, drop VSYNC at frame drop, limit exposure timing, drop frame when the AEC step is larger than the exposure gap */ {SENSOR, OV9650_COM9, 0x6e}, /* Set VSYNC negative, Set RESET to SLHS (slave mode horizontal sync) and set PWDN to SLVS (slave mode vertical sync) */ {SENSOR, OV9650_COM10, 0x42}, /* Set horizontal column start high to default value */ {SENSOR, OV9650_HSTART, 0x1a}, /* 210 */ /* Set horizontal column end */ {SENSOR, OV9650_HSTOP, 0xbf}, /* 1534 */ /* Complementing register to the two writes above */ {SENSOR, OV9650_HREF, 0xb2}, /* Set vertical row start high bits */ {SENSOR, OV9650_VSTRT, 0x02}, /* Set vertical row end low bits */ {SENSOR, OV9650_VSTOP, 0x7e}, /* Set complementing vertical frame control */ {SENSOR, OV9650_VREF, 0x10}, {SENSOR, OV9650_ADC, 0x04}, {SENSOR, OV9650_HV, 0x40}, /* Enable denoise, and white-pixel erase */ {SENSOR, OV9650_COM22, OV9650_DENOISE_ENABLE | OV9650_WHITE_PIXEL_ENABLE | OV9650_WHITE_PIXEL_OPTION}, /* Enable VARIOPIXEL */ {SENSOR, OV9650_COM3, OV9650_VARIOPIXEL}, {SENSOR, OV9650_COM4, OV9650_QVGA_VARIOPIXEL}, /* Put the sensor in soft sleep mode */ {SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X}, }; static const unsigned char res_init_ov9650[][3] = { {SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X}, {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x82}, {BRIDGE, M5602_XB_LINE_OF_FRAME_L, 0x00}, {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82}, {BRIDGE, M5602_XB_PIX_OF_LINE_L, 0x00}, {BRIDGE, M5602_XB_SIG_INI, 0x01} }; /* Vertically and horizontally flips the image if matched, needed for machines where the sensor is mounted upside down */ static const struct dmi_system_id ov9650_flip_dmi_table[] = { { .ident = "ASUS A6Ja", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A6J") } }, { .ident = "ASUS A6JC", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A6JC") } }, { .ident = "ASUS A6K", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A6K") } }, { .ident = "ASUS A6Kt", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt") } }, { .ident = "ASUS A6VA", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A6VA") } }, { .ident = "ASUS A6VC", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A6VC") } }, { .ident = "ASUS A6VM", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A6VM") } }, { .ident = "ASUS A7V", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "A7V") } }, { .ident = "Alienware Aurora m9700", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "alienware"), DMI_MATCH(DMI_PRODUCT_NAME, "Aurora m9700") } }, {} }; static struct v4l2_pix_format ov9650_modes[] = { { 176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 176 * 144, .bytesperline = 176, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 9 }, { 320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 320 * 240, .bytesperline = 320, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 8 }, { 352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 352 * 288, .bytesperline = 352, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 9 }, { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 640 * 480, .bytesperline = 640, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 9 } }; static const struct v4l2_ctrl_ops ov9650_ctrl_ops = { .s_ctrl = ov9650_s_ctrl, }; int ov9650_probe(struct sd *sd) { int err = 0; u8 prod_id = 0, ver_id = 0, i; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if (force_sensor) { if (force_sensor == OV9650_SENSOR) { pr_info("Forcing an %s sensor\n", ov9650.name); goto sensor_found; } /* If we want to force another sensor, don't try to probe this one */ return -ENODEV; } gspca_dbg(gspca_dev, D_PROBE, "Probing for an ov9650 sensor\n"); /* Run the pre-init before probing the sensor */ for (i = 0; i < ARRAY_SIZE(preinit_ov9650) && !err; i++) { u8 data = preinit_ov9650[i][2]; if (preinit_ov9650[i][0] == SENSOR) err = m5602_write_sensor(sd, preinit_ov9650[i][1], &data, 1); else err = m5602_write_bridge(sd, preinit_ov9650[i][1], data); } if (err < 0) return err; if (m5602_read_sensor(sd, OV9650_PID, &prod_id, 1)) return -ENODEV; if (m5602_read_sensor(sd, OV9650_VER, &ver_id, 1)) return -ENODEV; if ((prod_id == 0x96) && (ver_id == 0x52)) { pr_info("Detected an ov9650 sensor\n"); goto sensor_found; } return -ENODEV; sensor_found: sd->gspca_dev.cam.cam_mode = ov9650_modes; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(ov9650_modes); return 0; } int ov9650_init(struct sd *sd) { int i, err = 0; u8 data; if (dump_sensor) ov9650_dump_registers(sd); for (i = 0; i < ARRAY_SIZE(init_ov9650) && !err; i++) { data = init_ov9650[i][2]; if (init_ov9650[i][0] == SENSOR) err = m5602_write_sensor(sd, init_ov9650[i][1], &data, 1); else err = m5602_write_bridge(sd, init_ov9650[i][1], data); } return 0; } int ov9650_init_controls(struct sd *sd) { struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler; sd->gspca_dev.vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 9); sd->auto_white_bal = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); sd->red_bal = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 255, 1, RED_GAIN_DEFAULT); sd->blue_bal = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 255, 1, BLUE_GAIN_DEFAULT); sd->autoexpo = v4l2_ctrl_new_std_menu(hdl, &ov9650_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, V4L2_EXPOSURE_AUTO); sd->expo = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_EXPOSURE, 0, 0x1ff, 4, EXPOSURE_DEFAULT); sd->autogain = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); sd->gain = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_GAIN, 0, 0x3ff, 1, GAIN_DEFAULT); sd->hflip = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_auto_cluster(3, &sd->auto_white_bal, 0, false); v4l2_ctrl_auto_cluster(2, &sd->autoexpo, 0, false); v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false); v4l2_ctrl_cluster(2, &sd->hflip); return 0; } int ov9650_start(struct sd *sd) { u8 data; int i, err = 0; struct cam *cam = &sd->gspca_dev.cam; int width = cam->cam_mode[sd->gspca_dev.curr_mode].width; int height = cam->cam_mode[sd->gspca_dev.curr_mode].height; int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv; int hor_offs = OV9650_LEFT_OFFSET; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if ((!dmi_check_system(ov9650_flip_dmi_table) && sd->vflip->val) || (dmi_check_system(ov9650_flip_dmi_table) && !sd->vflip->val)) ver_offs--; if (width <= 320) hor_offs /= 2; /* Synthesize the vsync/hsync setup */ for (i = 0; i < ARRAY_SIZE(res_init_ov9650) && !err; i++) { if (res_init_ov9650[i][0] == BRIDGE) err = m5602_write_bridge(sd, res_init_ov9650[i][1], res_init_ov9650[i][2]); else if (res_init_ov9650[i][0] == SENSOR) { data = res_init_ov9650[i][2]; err = m5602_write_sensor(sd, res_init_ov9650[i][1], &data, 1); } } if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, ((ver_offs >> 8) & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (ver_offs & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff)); if (err < 0) return err; for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 2); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (hor_offs >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, hor_offs & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, ((width + hor_offs) >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, ((width + hor_offs) & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); if (err < 0) return err; switch (width) { case 640: gspca_dbg(gspca_dev, D_CONF, "Configuring camera for VGA mode\n"); data = OV9650_VGA_SELECT | OV9650_RGB_SELECT | OV9650_RAW_RGB_SELECT; err = m5602_write_sensor(sd, OV9650_COM7, &data, 1); break; case 352: gspca_dbg(gspca_dev, D_CONF, "Configuring camera for CIF mode\n"); data = OV9650_CIF_SELECT | OV9650_RGB_SELECT | OV9650_RAW_RGB_SELECT; err = m5602_write_sensor(sd, OV9650_COM7, &data, 1); break; case 320: gspca_dbg(gspca_dev, D_CONF, "Configuring camera for QVGA mode\n"); data = OV9650_QVGA_SELECT | OV9650_RGB_SELECT | OV9650_RAW_RGB_SELECT; err = m5602_write_sensor(sd, OV9650_COM7, &data, 1); break; case 176: gspca_dbg(gspca_dev, D_CONF, "Configuring camera for QCIF mode\n"); data = OV9650_QCIF_SELECT | OV9650_RGB_SELECT | OV9650_RAW_RGB_SELECT; err = m5602_write_sensor(sd, OV9650_COM7, &data, 1); break; } return err; } int ov9650_stop(struct sd *sd) { u8 data = OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X; return m5602_write_sensor(sd, OV9650_COM2, &data, 1); } void ov9650_disconnect(struct sd *sd) { ov9650_stop(sd); sd->sensor = NULL; } static int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; gspca_dbg(gspca_dev, D_CONF, "Set exposure to %d\n", val); /* The 6 MSBs */ i2c_data = (val >> 10) & 0x3f; err = m5602_write_sensor(sd, OV9650_AECHM, &i2c_data, 1); if (err < 0) return err; /* The 8 middle bits */ i2c_data = (val >> 2) & 0xff; err = m5602_write_sensor(sd, OV9650_AECH, &i2c_data, 1); if (err < 0) return err; /* The 2 LSBs */ i2c_data = val & 0x03; err = m5602_write_sensor(sd, OV9650_COM1, &i2c_data, 1); return err; } static int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 i2c_data; struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_CONF, "Setting gain to %d\n", val); /* The 2 MSB */ /* Read the OV9650_VREF register first to avoid corrupting the VREF high and low bits */ err = m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1); if (err < 0) return err; /* Mask away all uninteresting bits */ i2c_data = ((val & 0x0300) >> 2) | (i2c_data & 0x3f); err = m5602_write_sensor(sd, OV9650_VREF, &i2c_data, 1); if (err < 0) return err; /* The 8 LSBs */ i2c_data = val & 0xff; err = m5602_write_sensor(sd, OV9650_GAIN, &i2c_data, 1); return err; } static int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 i2c_data; struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_CONF, "Set red gain to %d\n", val); i2c_data = val & 0xff; err = m5602_write_sensor(sd, OV9650_RED, &i2c_data, 1); return err; } static int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 i2c_data; struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_CONF, "Set blue gain to %d\n", val); i2c_data = val & 0xff; err = m5602_write_sensor(sd, OV9650_BLUE, &i2c_data, 1); return err; } static int ov9650_set_hvflip(struct gspca_dev *gspca_dev) { int err; u8 i2c_data; struct sd *sd = (struct sd *) gspca_dev; int hflip = sd->hflip->val; int vflip = sd->vflip->val; gspca_dbg(gspca_dev, D_CONF, "Set hvflip to %d %d\n", hflip, vflip); if (dmi_check_system(ov9650_flip_dmi_table)) vflip = !vflip; i2c_data = (hflip << 5) | (vflip << 4); err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1); if (err < 0) return err; /* When vflip is toggled we need to readjust the bridge hsync/vsync */ if (gspca_dev->streaming) err = ov9650_start(sd); return err; } static int ov9650_set_auto_exposure(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 i2c_data; struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_CONF, "Set auto exposure control to %d\n", val); err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1); if (err < 0) return err; val = (val == V4L2_EXPOSURE_AUTO); i2c_data = ((i2c_data & 0xfe) | ((val & 0x01) << 0)); return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1); } static int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 i2c_data; struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_CONF, "Set auto white balance to %d\n", val); err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1); if (err < 0) return err; i2c_data = ((i2c_data & 0xfd) | ((val & 0x01) << 1)); err = m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1); return err; } static int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val) { int err; u8 i2c_data; struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_CONF, "Set auto gain control to %d\n", val); err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1); if (err < 0) return err; i2c_data = ((i2c_data & 0xfb) | ((val & 0x01) << 2)); return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1); } static int ov9650_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *) gspca_dev; int err; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: err = ov9650_set_auto_white_balance(gspca_dev, ctrl->val); if (err || ctrl->val) return err; err = ov9650_set_red_balance(gspca_dev, sd->red_bal->val); if (err) return err; err = ov9650_set_blue_balance(gspca_dev, sd->blue_bal->val); break; case V4L2_CID_EXPOSURE_AUTO: err = ov9650_set_auto_exposure(gspca_dev, ctrl->val); if (err || ctrl->val == V4L2_EXPOSURE_AUTO) return err; err = ov9650_set_exposure(gspca_dev, sd->expo->val); break; case V4L2_CID_AUTOGAIN: err = ov9650_set_auto_gain(gspca_dev, ctrl->val); if (err || ctrl->val) return err; err = ov9650_set_gain(gspca_dev, sd->gain->val); break; case V4L2_CID_HFLIP: err = ov9650_set_hvflip(gspca_dev); break; default: return -EINVAL; } return err; } static void ov9650_dump_registers(struct sd *sd) { int address; pr_info("Dumping the ov9650 register state\n"); for (address = 0; address < 0xa9; address++) { u8 value; m5602_read_sensor(sd, address, &value, 1); pr_info("register 0x%x contains 0x%x\n", address, value); } pr_info("ov9650 register state dump complete\n"); pr_info("Probing for which registers that are read/write\n"); for (address = 0; address < 0xff; address++) { u8 old_value, ctrl_value; u8 test_value[2] = {0xff, 0xff}; m5602_read_sensor(sd, address, &old_value, 1); m5602_write_sensor(sd, address, test_value, 1); m5602_read_sensor(sd, address, &ctrl_value, 1); if (ctrl_value == test_value[0]) pr_info("register 0x%x is writeable\n", address); else pr_info("register 0x%x is read only\n", address); /* Restore original value */ m5602_write_sensor(sd, address, &old_value, 1); } } |
3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some cherry "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* * Cherry Cymotion keyboard have an invalid HID report descriptor, * that needs fixing before we can parse it. */ static const __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; } return rdesc; } #define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x301: ch_map_key_clear(KEY_PROG1); break; case 0x302: ch_map_key_clear(KEY_PROG2); break; case 0x303: ch_map_key_clear(KEY_PROG3); break; default: return 0; } return 1; } static const struct hid_device_id ch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, { } }; MODULE_DEVICE_TABLE(hid, ch_devices); static struct hid_driver ch_driver = { .name = "cherry", .id_table = ch_devices, .report_fixup = ch_report_fixup, .input_mapping = ch_input_mapping, }; module_hid_driver(ch_driver); MODULE_DESCRIPTION("HID driver for some cherry \"special\" devices"); MODULE_LICENSE("GPL"); |
48 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Directory notifications for Linux. * * Copyright (C) 2000,2001,2002 Stephen Rothwell * * Copyright (C) 2009 Eric Paris <Red Hat Inc> * dnotify was largly rewritten to use the new fsnotify infrastructure */ #include <linux/fs.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/dnotify.h> #include <linux/init.h> #include <linux/security.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/fsnotify_backend.h> static int dir_notify_enable __read_mostly = 1; #ifdef CONFIG_SYSCTL static const struct ctl_table dnotify_sysctls[] = { { .procname = "dir-notify-enable", .data = &dir_notify_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, }; static void __init dnotify_sysctl_init(void) { register_sysctl_init("fs", dnotify_sysctls); } #else #define dnotify_sysctl_init() do { } while (0) #endif static struct kmem_cache *dnotify_struct_cache __ro_after_init; static struct kmem_cache *dnotify_mark_cache __ro_after_init; static struct fsnotify_group *dnotify_group __ro_after_init; /* * dnotify will attach one of these to each inode (i_fsnotify_marks) which * is being watched by dnotify. If multiple userspace applications are watching * the same directory with dnotify their information is chained in dn */ struct dnotify_mark { struct fsnotify_mark fsn_mark; struct dnotify_struct *dn; }; /* * When a process starts or stops watching an inode the set of events which * dnotify cares about for that inode may change. This function runs the * list of everything receiving dnotify events about this directory and calculates * the set of all those events. After it updates what dnotify is interested in * it calls the fsnotify function so it can update the set of all events relevant * to this inode. */ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) { __u32 new_mask = 0; struct dnotify_struct *dn; struct dnotify_mark *dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); assert_spin_locked(&fsn_mark->lock); for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); if (fsn_mark->mask == new_mask) return; fsn_mark->mask = new_mask; fsnotify_recalc_mask(fsn_mark->connector); } /* * Mains fsnotify call where events are delivered to dnotify. * Find the dnotify mark on the relevant inode, run the list of dnotify structs * on that mark and determine which of them has expressed interest in receiving * events of this type. When found send the correct process and signal and * destroy the dnotify struct if it was not registered to receive multiple * events. */ static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *name, u32 cookie) { struct dnotify_mark *dn_mark; struct dnotify_struct *dn; struct dnotify_struct **prev; struct fown_struct *fown; __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; /* not a dir, dnotify doesn't care */ if (!dir && !(mask & FS_ISDIR)) return 0; dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); spin_lock(&inode_mark->lock); prev = &dn_mark->dn; while ((dn = *prev) != NULL) { if ((dn->dn_mask & test_mask) == 0) { prev = &dn->dn_next; continue; } fown = file_f_owner(dn->dn_filp); send_sigio(fown, dn->dn_fd, POLL_MSG); if (dn->dn_mask & FS_DN_MULTISHOT) prev = &dn->dn_next; else { *prev = dn->dn_next; kmem_cache_free(dnotify_struct_cache, dn); dnotify_recalc_inode_mask(inode_mark); } } spin_unlock(&inode_mark->lock); return 0; } static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) { struct dnotify_mark *dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); BUG_ON(dn_mark->dn); kmem_cache_free(dnotify_mark_cache, dn_mark); } static const struct fsnotify_ops dnotify_fsnotify_ops = { .handle_inode_event = dnotify_handle_event, .free_mark = dnotify_free_mark, }; /* * Called every time a file is closed. Looks first for a dnotify mark on the * inode. If one is found run all of the ->dn structures attached to that * mark for one relevant to this process closing the file and remove that * dnotify_struct. If that was the last dnotify_struct also remove the * fsnotify_mark. */ void dnotify_flush(struct file *filp, fl_owner_t id) { struct fsnotify_mark *fsn_mark; struct dnotify_mark *dn_mark; struct dnotify_struct *dn; struct dnotify_struct **prev; struct inode *inode; bool free = false; inode = file_inode(filp); if (!S_ISDIR(inode->i_mode)) return; fsn_mark = fsnotify_find_inode_mark(inode, dnotify_group); if (!fsn_mark) return; dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); fsnotify_group_lock(dnotify_group); spin_lock(&fsn_mark->lock); prev = &dn_mark->dn; while ((dn = *prev) != NULL) { if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { *prev = dn->dn_next; kmem_cache_free(dnotify_struct_cache, dn); dnotify_recalc_inode_mask(fsn_mark); break; } prev = &dn->dn_next; } spin_unlock(&fsn_mark->lock); /* nothing else could have found us thanks to the dnotify_groups mark_mutex */ if (dn_mark->dn == NULL) { fsnotify_detach_mark(fsn_mark); free = true; } fsnotify_group_unlock(dnotify_group); if (free) fsnotify_free_mark(fsn_mark); fsnotify_put_mark(fsn_mark); } /* this conversion is done only at watch creation */ static __u32 convert_arg(unsigned int arg) { __u32 new_mask = FS_EVENT_ON_CHILD; if (arg & DN_MULTISHOT) new_mask |= FS_DN_MULTISHOT; if (arg & DN_DELETE) new_mask |= (FS_DELETE | FS_MOVED_FROM); if (arg & DN_MODIFY) new_mask |= FS_MODIFY; if (arg & DN_ACCESS) new_mask |= FS_ACCESS; if (arg & DN_ATTRIB) new_mask |= FS_ATTRIB; if (arg & DN_RENAME) new_mask |= FS_RENAME; if (arg & DN_CREATE) new_mask |= (FS_CREATE | FS_MOVED_TO); return new_mask; } /* * If multiple processes watch the same inode with dnotify there is only one * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct * onto that mark. This function either attaches the new dnotify_struct onto * that list, or it |= the mask onto an existing dnofiy_struct. */ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark, fl_owner_t id, int fd, struct file *filp, __u32 mask) { struct dnotify_struct *odn; odn = dn_mark->dn; while (odn != NULL) { /* adding more events to existing dnofiy_struct? */ if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { odn->dn_fd = fd; odn->dn_mask |= mask; return -EEXIST; } odn = odn->dn_next; } dn->dn_mask = mask; dn->dn_fd = fd; dn->dn_filp = filp; dn->dn_owner = id; dn->dn_next = dn_mark->dn; dn_mark->dn = dn; return 0; } /* * When a process calls fcntl to attach a dnotify watch to a directory it ends * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be * attached to the fsnotify_mark. */ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg) { struct dnotify_mark *new_dn_mark, *dn_mark; struct fsnotify_mark *new_fsn_mark, *fsn_mark; struct dnotify_struct *dn; struct inode *inode; fl_owner_t id = current->files; struct file *f = NULL; int destroy = 0, error = 0; __u32 mask; /* we use these to tell if we need to kfree */ new_fsn_mark = NULL; dn = NULL; if (!dir_notify_enable) { error = -EINVAL; goto out_err; } /* a 0 mask means we are explicitly removing the watch */ if ((arg & ~DN_MULTISHOT) == 0) { dnotify_flush(filp, id); error = 0; goto out_err; } /* dnotify only works on directories */ inode = file_inode(filp); if (!S_ISDIR(inode->i_mode)) { error = -ENOTDIR; goto out_err; } /* * convert the userspace DN_* "arg" to the internal FS_* * defined in fsnotify */ mask = convert_arg(arg); error = security_path_notify(&filp->f_path, mask, FSNOTIFY_OBJ_TYPE_INODE); if (error) goto out_err; /* expect most fcntl to add new rather than augment old */ dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); if (!dn) { error = -ENOMEM; goto out_err; } /* new fsnotify mark, we expect most fcntl calls to add a new mark */ new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL); if (!new_dn_mark) { error = -ENOMEM; goto out_err; } error = file_f_owner_allocate(filp); if (error) goto out_err; /* set up the new_fsn_mark and new_dn_mark */ new_fsn_mark = &new_dn_mark->fsn_mark; fsnotify_init_mark(new_fsn_mark, dnotify_group); new_fsn_mark->mask = mask; new_dn_mark->dn = NULL; /* this is needed to prevent the fcntl/close race described below */ fsnotify_group_lock(dnotify_group); /* add the new_fsn_mark or find an old one. */ fsn_mark = fsnotify_find_inode_mark(inode, dnotify_group); if (fsn_mark) { dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); spin_lock(&fsn_mark->lock); } else { error = fsnotify_add_inode_mark_locked(new_fsn_mark, inode, 0); if (error) { fsnotify_group_unlock(dnotify_group); goto out_err; } spin_lock(&new_fsn_mark->lock); fsn_mark = new_fsn_mark; dn_mark = new_dn_mark; /* we used new_fsn_mark, so don't free it */ new_fsn_mark = NULL; } f = fget_raw(fd); /* if (f != filp) means that we lost a race and another task/thread * actually closed the fd we are still playing with before we grabbed * the dnotify_groups mark_mutex and fsn_mark->lock. Since closing the * fd is the only time we clean up the marks we need to get our mark * off the list. */ if (f != filp) { /* if we added ourselves, shoot ourselves, it's possible that * the flush actually did shoot this fsn_mark. That's fine too * since multiple calls to destroy_mark is perfectly safe, if * we found a dn_mark already attached to the inode, just sod * off silently as the flush at close time dealt with it. */ if (dn_mark == new_dn_mark) destroy = 1; error = 0; goto out; } __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); error = attach_dn(dn, dn_mark, id, fd, filp, mask); /* !error means that we attached the dn to the dn_mark, so don't free it */ if (!error) dn = NULL; /* -EEXIST means that we didn't add this new dn and used an old one. * that isn't an error (and the unused dn should be freed) */ else if (error == -EEXIST) error = 0; dnotify_recalc_inode_mask(fsn_mark); out: spin_unlock(&fsn_mark->lock); if (destroy) fsnotify_detach_mark(fsn_mark); fsnotify_group_unlock(dnotify_group); if (destroy) fsnotify_free_mark(fsn_mark); fsnotify_put_mark(fsn_mark); out_err: if (new_fsn_mark) fsnotify_put_mark(new_fsn_mark); if (dn) kmem_cache_free(dnotify_struct_cache, dn); if (f) fput(f); return error; } static int __init dnotify_init(void) { dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC|SLAB_ACCOUNT); dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC|SLAB_ACCOUNT); dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops, 0); if (IS_ERR(dnotify_group)) panic("unable to allocate fsnotify group for dnotify\n"); dnotify_sysctl_init(); return 0; } module_init(dnotify_init) |
1109 1105 1273 159 1109 1111 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_BL_H #define _LINUX_RCULIST_BL_H /* * RCU-protected bl list version. See include/linux/list_bl.h. */ #include <linux/list_bl.h> #include <linux/rcupdate.h> static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, struct hlist_bl_node *n) { LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != LIST_BL_LOCKMASK); rcu_assign_pointer(h->first, (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); } static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) { return (struct hlist_bl_node *) ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK); } /** * hlist_bl_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: hlist_bl_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_bl_add_head_rcu() * or hlist_bl_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_bl_for_each_entry(). */ static inline void hlist_bl_del_rcu(struct hlist_bl_node *n) { __hlist_bl_del(n); n->pprev = LIST_POISON2; } /** * hlist_bl_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_bl, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_bl_add_head_rcu() * or hlist_bl_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n, struct hlist_bl_head *h) { struct hlist_bl_node *first; /* don't need hlist_bl_first_rcu because we're under lock */ first = hlist_bl_first(h); n->next = first; if (first) first->pprev = &n->next; n->pprev = &h->first; /* need _rcu because we can have concurrent lock free readers */ hlist_bl_set_first_rcu(h, n); } /** * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_bl_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_bl_node within the struct. * */ #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = hlist_bl_first_rcu(head); \ pos && \ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(pos->next)) #endif |
30 30 3225 38 38 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/of_iommu.h> #include <linux/of_reserved_mem.h> #include <linux/dma-direct.h> /* for bus_dma_region */ #include <linux/dma-map-ops.h> #include <linux/init.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <asm/errno.h> #include "of_private.h" /** * of_match_device - Tell if a struct device matches an of_device_id list * @matches: array of of device match structures to search in * @dev: the of device structure to match against * * Used by a driver to check whether an platform_device present in the * system is in its list of supported devices. */ const struct of_device_id *of_match_device(const struct of_device_id *matches, const struct device *dev) { if (!matches || !dev->of_node || dev->of_node_reused) return NULL; return of_match_node(matches, dev->of_node); } EXPORT_SYMBOL(of_match_device); static void of_dma_set_restricted_buffer(struct device *dev, struct device_node *np) { struct device_node *of_node = dev->of_node; struct of_phandle_iterator it; int rc, i = 0; if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL)) return; /* * If dev->of_node doesn't exist or doesn't contain memory-region, try * the OF node having DMA configuration. */ if (!of_property_present(of_node, "memory-region")) of_node = np; of_for_each_phandle(&it, rc, of_node, "memory-region", NULL, 0) { /* * There might be multiple memory regions, but only one * restricted-dma-pool region is allowed. */ if (of_device_is_compatible(it.node, "restricted-dma-pool") && of_device_is_available(it.node)) { if (of_reserved_mem_device_init_by_idx(dev, of_node, i)) dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n"); of_node_put(it.node); break; } i++; } } /** * of_dma_configure_id - Setup DMA configuration * @dev: Device to apply DMA configuration * @np: Pointer to OF node having DMA configuration * @force_dma: Whether device is to be set up by of_dma_configure() even if * DMA capability is not explicitly described by firmware. * @id: Optional const pointer value input id * * Try to get devices's DMA configuration from DT and update it * accordingly. * * If platform code needs to use its own special DMA configuration, it * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events * to fix up DMA configuration. */ int of_dma_configure_id(struct device *dev, struct device_node *np, bool force_dma, const u32 *id) { const struct bus_dma_region *map = NULL; struct device_node *bus_np; u64 mask, end = 0; bool coherent, set_map = false; int ret; if (dev->dma_range_map) { dev_dbg(dev, "dma_range_map already set\n"); goto skip_map; } if (np == dev->of_node) bus_np = __of_get_dma_parent(np); else bus_np = of_node_get(np); ret = of_dma_get_range(bus_np, &map); of_node_put(bus_np); if (ret < 0) { /* * For legacy reasons, we have to assume some devices need * DMA configuration regardless of whether "dma-ranges" is * correctly specified or not. */ if (!force_dma) return ret == -ENODEV ? 0 : ret; } else { /* Determine the overall bounds of all DMA regions */ end = dma_range_map_max(map); set_map = true; } skip_map: /* * If @dev is expected to be DMA-capable then the bus code that created * it should have initialised its dma_mask pointer by this point. For * now, we'll continue the legacy behaviour of coercing it to the * coherent mask if not, but we'll no longer do so quietly. */ if (!dev->dma_mask) { dev_warn(dev, "DMA mask not set\n"); dev->dma_mask = &dev->coherent_dma_mask; } if (!end && dev->coherent_dma_mask) end = dev->coherent_dma_mask; else if (!end) end = (1ULL << 32) - 1; /* * Limit coherent and dma mask based on size and default mask * set by the driver. */ mask = DMA_BIT_MASK(ilog2(end) + 1); dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; /* ...but only set bus limit and range map if we found valid dma-ranges earlier */ if (set_map) { dev->bus_dma_limit = end; dev->dma_range_map = map; } coherent = of_dma_is_coherent(np); dev_dbg(dev, "device is%sdma coherent\n", coherent ? " " : " not "); ret = of_iommu_configure(dev, np, id); if (ret == -EPROBE_DEFER) { /* Don't touch range map if it wasn't set from a valid dma-ranges */ if (set_map) dev->dma_range_map = NULL; kfree(map); return -EPROBE_DEFER; } /* Take all other IOMMU errors to mean we'll just carry on without it */ dev_dbg(dev, "device is%sbehind an iommu\n", !ret ? " " : " not "); arch_setup_dma_ops(dev, coherent); if (ret) of_dma_set_restricted_buffer(dev, np); return 0; } EXPORT_SYMBOL_GPL(of_dma_configure_id); const void *of_device_get_match_data(const struct device *dev) { const struct of_device_id *match; match = of_match_device(dev->driver->of_match_table, dev); if (!match) return NULL; return match->data; } EXPORT_SYMBOL(of_device_get_match_data); /** * of_device_modalias - Fill buffer with newline terminated modalias string * @dev: Calling device * @str: Modalias string * @len: Size of @str */ ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len) { ssize_t sl; if (!dev || !dev->of_node || dev->of_node_reused) return -ENODEV; sl = of_modalias(dev->of_node, str, len - 2); if (sl < 0) return sl; if (sl > len - 2) return -ENOMEM; str[sl++] = '\n'; str[sl] = 0; return sl; } EXPORT_SYMBOL_GPL(of_device_modalias); /** * of_device_uevent - Display OF related uevent information * @dev: Device to display the uevent information for * @env: Kernel object's userspace event reference to fill up */ void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { const char *compat, *type; struct alias_prop *app; struct property *p; int seen = 0; if ((!dev) || (!dev->of_node)) return; add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node); add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node); type = of_node_get_device_type(dev->of_node); if (type) add_uevent_var(env, "OF_TYPE=%s", type); /* Since the compatible field can contain pretty much anything * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ of_property_for_each_string(dev->of_node, "compatible", p, compat) { add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat); seen++; } add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); seen = 0; mutex_lock(&of_mutex); list_for_each_entry(app, &aliases_lookup, link) { if (dev->of_node == app->np) { add_uevent_var(env, "OF_ALIAS_%d=%s", seen, app->alias); seen++; } } mutex_unlock(&of_mutex); } EXPORT_SYMBOL_GPL(of_device_uevent); int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { int sl; if ((!dev) || (!dev->of_node) || dev->of_node_reused) return -ENODEV; /* Devicetree modalias is tricky, we add it in 2 steps */ if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; sl = of_modalias(dev->of_node, &env->buf[env->buflen-1], sizeof(env->buf) - env->buflen); if (sl < 0) return sl; if (sl >= (sizeof(env->buf) - env->buflen)) return -ENOMEM; env->buflen += sl; return 0; } EXPORT_SYMBOL_GPL(of_device_uevent_modalias); /** * of_device_make_bus_id - Use the device node data to assign a unique name * @dev: pointer to device structure that is linked to a device tree node * * This routine will first try using the translated bus address to * derive a unique name. If it cannot, then it will prepend names from * parent nodes until a unique name can be derived. */ void of_device_make_bus_id(struct device *dev) { struct device_node *node = dev->of_node; const __be32 *reg; u64 addr; u32 mask; /* Construct the name, using parent nodes if necessary to ensure uniqueness */ while (node->parent) { /* * If the address can be translated, then that is as much * uniqueness as we need. Make it the first component and return */ reg = of_get_property(node, "reg", NULL); if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { if (!of_property_read_u32(node, "mask", &mask)) dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn", addr, ffs(mask) - 1, node, dev_name(dev)); else dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", addr, node, dev_name(dev)); return; } /* format arguments only used if dev_name() resolves to NULL */ dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s", kbasename(node->full_name), dev_name(dev)); node = node->parent; } } EXPORT_SYMBOL_GPL(of_device_make_bus_id); |
4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 | // SPDX-License-Identifier: ISC /* Copyright (C) 2023 MediaTek Inc. */ #include <linux/module.h> #include <linux/firmware.h> #include "mt792x.h" #include "dma.h" static const struct ieee80211_iface_limit if_limits[] = { { .max = MT792x_MAX_INTERFACES, .types = BIT(NL80211_IFTYPE_STATION) }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) } }; static const struct ieee80211_iface_combination if_comb[] = { { .limits = if_limits, .n_limits = ARRAY_SIZE(if_limits), .max_interfaces = MT792x_MAX_INTERFACES, .num_different_channels = 1, .beacon_int_infra_match = true, }, }; static const struct ieee80211_iface_limit if_limits_chanctx[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } }; static const struct ieee80211_iface_combination if_comb_chanctx[] = { { .limits = if_limits_chanctx, .n_limits = ARRAY_SIZE(if_limits_chanctx), .max_interfaces = 3, .num_different_channels = 2, .beacon_int_infra_match = false, } }; void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_phy *mphy = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct mt76_wcid *wcid = &dev->mt76.global_wcid; u8 link_id; int qid; if (control->sta) { struct mt792x_link_sta *mlink; struct mt792x_sta *sta; link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); sta = (struct mt792x_sta *)control->sta->drv_priv; mlink = mt792x_sta_to_link(sta, link_id); wcid = &mlink->wcid; } if (vif && !control->sta) { struct mt792x_vif *mvif; mvif = (struct mt792x_vif *)vif->drv_priv; wcid = &mvif->sta.deflink.wcid; } if (vif && control->sta && ieee80211_vif_is_mld(vif)) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_link_sta *link_sta; struct ieee80211_bss_conf *conf; link_id = wcid->link_id; rcu_read_lock(); conf = rcu_dereference(vif->link_conf[link_id]); memcpy(hdr->addr2, conf->addr, ETH_ALEN); link_sta = rcu_dereference(control->sta->link[link_id]); memcpy(hdr->addr1, link_sta->addr, ETH_ALEN); if (vif->type == NL80211_IFTYPE_STATION) memcpy(hdr->addr3, conf->bssid, ETH_ALEN); rcu_read_unlock(); } if (mt76_connac_pm_ref(mphy, &dev->pm)) { mt76_tx(mphy, control->sta, wcid, skb); mt76_connac_pm_unref(mphy, &dev->pm); return; } qid = skb_get_queue_mapping(skb); if (qid >= MT_TXQ_PSD) { qid = IEEE80211_AC_BE; skb_set_queue_mapping(skb, qid); } mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb); } EXPORT_SYMBOL_GPL(mt792x_tx); void mt792x_stop(struct ieee80211_hw *hw, bool suspend) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_phy *phy = mt792x_hw_phy(hw); cancel_delayed_work_sync(&phy->mt76->mac_work); cancel_delayed_work_sync(&dev->pm.ps_work); cancel_work_sync(&dev->pm.wake_work); cancel_work_sync(&dev->reset_work); mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); if (is_mt7921(&dev->mt76)) { mt792x_mutex_acquire(dev); mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); mt792x_mutex_release(dev); } clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); } EXPORT_SYMBOL_GPL(mt792x_stop); void mt792x_mac_link_bss_remove(struct mt792x_dev *dev, struct mt792x_bss_conf *mconf, struct mt792x_link_sta *mlink) { struct ieee80211_vif *vif = container_of((void *)mconf->vif, struct ieee80211_vif, drv_priv); struct ieee80211_bss_conf *link_conf; int idx = mlink->wcid.idx; link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76, &mlink->wcid, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); dev->mt76.vif_mask &= ~BIT_ULL(mconf->mt76.idx); mconf->vif->phy->omac_mask &= ~BIT_ULL(mconf->mt76.omac_idx); spin_lock_bh(&dev->mt76.sta_poll_lock); if (!list_empty(&mlink->wcid.poll_list)) list_del_init(&mlink->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); mt76_wcid_cleanup(&dev->mt76, &mlink->wcid); } EXPORT_SYMBOL_GPL(mt792x_mac_link_bss_remove); void mt792x_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_bss_conf *mconf; mt792x_mutex_acquire(dev); mconf = mt792x_link_conf_to_mconf(&vif->bss_conf); mt792x_mac_link_bss_remove(dev, mconf, &mvif->sta.deflink); mt792x_mutex_release(dev); } EXPORT_SYMBOL_GPL(mt792x_remove_interface); int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; /* no need to update right away, we'll get BSS_CHANGED_QOS */ queue = mt76_connac_lmac_mapping(queue); mvif->bss_conf.queue_params[queue] = *params; return 0; } EXPORT_SYMBOL_GPL(mt792x_conf_tx); int mt792x_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt76_mib_stats *mib = &phy->mib; mt792x_mutex_acquire(phy->dev); stats->dot11RTSSuccessCount = mib->rts_cnt; stats->dot11RTSFailureCount = mib->rts_retries_cnt; stats->dot11FCSErrorCount = mib->fcs_err_cnt; stats->dot11ACKFailureCount = mib->ack_fail_cnt; mt792x_mutex_release(phy->dev); return 0; } EXPORT_SYMBOL_GPL(mt792x_get_stats); u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; } tsf; u16 n; mt792x_mutex_acquire(dev); n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; /* TSF software read */ mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_MODE); tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(0)); tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(0)); mt792x_mutex_release(dev); return tsf.t64; } EXPORT_SYMBOL_GPL(mt792x_get_tsf); void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 timestamp) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; } tsf = { .t64 = timestamp, }; u16 n; mt792x_mutex_acquire(dev); n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; mt76_wr(dev, MT_LPON_UTTR0(0), tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1(0), tsf.t32[1]); /* TSF software overwrite */ mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_WRITE); mt792x_mutex_release(dev); } EXPORT_SYMBOL_GPL(mt792x_set_tsf); void mt792x_tx_worker(struct mt76_worker *w) { struct mt792x_dev *dev = container_of(w, struct mt792x_dev, mt76.tx_worker); if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { queue_work(dev->mt76.wq, &dev->pm.wake_work); return; } mt76_txq_schedule_all(&dev->mphy); mt76_connac_pm_unref(&dev->mphy, &dev->pm); } EXPORT_SYMBOL_GPL(mt792x_tx_worker); void mt792x_roc_timer(struct timer_list *timer) { struct mt792x_phy *phy = timer_container_of(phy, timer, roc_timer); ieee80211_queue_work(phy->mt76->hw, &phy->roc_work); } EXPORT_SYMBOL_GPL(mt792x_roc_timer); void mt792x_csa_timer(struct timer_list *timer) { struct mt792x_vif *mvif = timer_container_of(mvif, timer, csa_timer); ieee80211_queue_work(mvif->phy->mt76->hw, &mvif->csa_work); } EXPORT_SYMBOL_GPL(mt792x_csa_timer); void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct mt792x_dev *dev = mt792x_hw_dev(hw); wait_event_timeout(dev->mt76.tx_wait, !mt76_has_tx_pending(&dev->mphy), HZ / 2); } EXPORT_SYMBOL_GPL(mt792x_flush); int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); mvif->bss_conf.mt76.ctx = ctx; mctx->bss_conf = &mvif->bss_conf; mutex_unlock(&dev->mt76.mutex); return 0; } EXPORT_SYMBOL_GPL(mt792x_assign_vif_chanctx); void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); mctx->bss_conf = NULL; mvif->bss_conf.mt76.ctx = NULL; mutex_unlock(&dev->mt76.mutex); if (vif->bss_conf.csa_active) { timer_delete_sync(&mvif->csa_timer); cancel_work_sync(&mvif->csa_work); } } EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx); void mt792x_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_dev *mdev = &dev->mt76; device_set_wakeup_enable(mdev->dev, enabled); } EXPORT_SYMBOL_GPL(mt792x_set_wakeup); static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = { /* tx counters */ "tx_ampdu_cnt", "tx_mpdu_attempts", "tx_mpdu_success", "tx_pkt_ebf_cnt", "tx_pkt_ibf_cnt", "tx_ampdu_len:0-1", "tx_ampdu_len:2-10", "tx_ampdu_len:11-19", "tx_ampdu_len:20-28", "tx_ampdu_len:29-37", "tx_ampdu_len:38-46", "tx_ampdu_len:47-55", "tx_ampdu_len:56-79", "tx_ampdu_len:80-103", "tx_ampdu_len:104-127", "tx_ampdu_len:128-151", "tx_ampdu_len:152-175", "tx_ampdu_len:176-199", "tx_ampdu_len:200-223", "tx_ampdu_len:224-247", "ba_miss_count", "tx_beamformer_ppdu_iBF", "tx_beamformer_ppdu_eBF", "tx_beamformer_rx_feedback_all", "tx_beamformer_rx_feedback_he", "tx_beamformer_rx_feedback_vht", "tx_beamformer_rx_feedback_ht", "tx_msdu_pack_1", "tx_msdu_pack_2", "tx_msdu_pack_3", "tx_msdu_pack_4", "tx_msdu_pack_5", "tx_msdu_pack_6", "tx_msdu_pack_7", "tx_msdu_pack_8", /* rx counters */ "rx_mpdu_cnt", "rx_ampdu_cnt", "rx_ampdu_bytes_cnt", "rx_ba_cnt", /* per vif counters */ "v_tx_mode_cck", "v_tx_mode_ofdm", "v_tx_mode_ht", "v_tx_mode_ht_gf", "v_tx_mode_vht", "v_tx_mode_he_su", "v_tx_mode_he_ext_su", "v_tx_mode_he_tb", "v_tx_mode_he_mu", "v_tx_mode_eht_su", "v_tx_mode_eht_trig", "v_tx_mode_eht_mu", "v_tx_bw_20", "v_tx_bw_40", "v_tx_bw_80", "v_tx_bw_160", "v_tx_bw_320", "v_tx_mcs_0", "v_tx_mcs_1", "v_tx_mcs_2", "v_tx_mcs_3", "v_tx_mcs_4", "v_tx_mcs_5", "v_tx_mcs_6", "v_tx_mcs_7", "v_tx_mcs_8", "v_tx_mcs_9", "v_tx_mcs_10", "v_tx_mcs_11", "v_tx_mcs_12", "v_tx_mcs_13", "v_tx_nss_1", "v_tx_nss_2", "v_tx_nss_3", "v_tx_nss_4", }; void mt792x_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset != ETH_SS_STATS) return; memcpy(data, mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats)); data += sizeof(mt792x_gstrings_stats); page_pool_ethtool_stats_get_strings(data); } EXPORT_SYMBOL_GPL(mt792x_get_et_strings); int mt792x_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset != ETH_SS_STATS) return 0; return ARRAY_SIZE(mt792x_gstrings_stats) + page_pool_ethtool_stats_get_count(); } EXPORT_SYMBOL_GPL(mt792x_get_et_sset_count); static void mt792x_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt76_ethtool_worker_info *wi = wi_data; if (msta->vif->bss_conf.mt76.idx != wi->idx) return; mt76_ethtool_worker(wi, &msta->deflink.wcid.stats, true); } void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; int stats_size = ARRAY_SIZE(mt792x_gstrings_stats); struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt792x_dev *dev = phy->dev; struct mt76_mib_stats *mib = &phy->mib; struct mt76_ethtool_worker_info wi = { .data = data, .idx = mvif->bss_conf.mt76.idx, }; int i, ei = 0; mt792x_mutex_acquire(dev); mt792x_mac_update_mib_stats(phy); data[ei++] = mib->tx_ampdu_cnt; data[ei++] = mib->tx_mpdu_attempts_cnt; data[ei++] = mib->tx_mpdu_success_cnt; data[ei++] = mib->tx_pkt_ebf_cnt; data[ei++] = mib->tx_pkt_ibf_cnt; /* Tx ampdu stat */ for (i = 0; i < 15; i++) data[ei++] = phy->mt76->aggr_stats[i]; data[ei++] = phy->mib.ba_miss_cnt; /* Tx Beamformer monitor */ data[ei++] = mib->tx_bf_ibf_ppdu_cnt; data[ei++] = mib->tx_bf_ebf_ppdu_cnt; /* Tx Beamformer Rx feedback monitor */ data[ei++] = mib->tx_bf_rx_fb_all_cnt; data[ei++] = mib->tx_bf_rx_fb_he_cnt; data[ei++] = mib->tx_bf_rx_fb_vht_cnt; data[ei++] = mib->tx_bf_rx_fb_ht_cnt; /* Tx amsdu info (pack-count histogram) */ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) data[ei++] = mib->tx_amsdu[i]; /* rx counters */ data[ei++] = mib->rx_mpdu_cnt; data[ei++] = mib->rx_ampdu_cnt; data[ei++] = mib->rx_ampdu_bytes_cnt; data[ei++] = mib->rx_ba_cnt; /* Add values for all stations owned by this vif */ wi.initial_stat_idx = ei; ieee80211_iterate_stations_atomic(hw, mt792x_ethtool_worker, &wi); mt792x_mutex_release(dev); if (!wi.sta_count) return; ei += wi.worker_stat_count; mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei); stats_size += page_pool_ethtool_stats_get_count(); if (ei != stats_size) dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size); } EXPORT_SYMBOL_GPL(mt792x_get_et_stats); void mt792x_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct rate_info *txrate = &msta->deflink.wcid.rate; if (!txrate->legacy && !txrate->flags) return; if (txrate->legacy) { sinfo->txrate.legacy = txrate->legacy; } else { sinfo->txrate.mcs = txrate->mcs; sinfo->txrate.nss = txrate->nss; sinfo->txrate.bw = txrate->bw; sinfo->txrate.he_gi = txrate->he_gi; sinfo->txrate.he_dcm = txrate->he_dcm; sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; } sinfo->tx_failed = msta->deflink.wcid.stats.tx_failed; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->tx_retries = msta->deflink.wcid.stats.tx_retries; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->ack_signal = (s8)msta->deflink.ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->deflink.avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } EXPORT_SYMBOL_GPL(mt792x_sta_statistics); void mt792x_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt792x_dev *dev = phy->dev; mt792x_mutex_acquire(dev); phy->coverage_class = max_t(s16, coverage_class, 0); mt792x_mac_set_timeing(phy); mt792x_mutex_release(dev); } EXPORT_SYMBOL_GPL(mt792x_set_coverage_class); int mt792x_init_wiphy(struct ieee80211_hw *hw) { struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt792x_dev *dev = phy->dev; struct wiphy *wiphy = hw->wiphy; hw->queues = 4; if (dev->has_eht) { hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; } else { hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; } hw->netdev_features = NETIF_F_RXCSUM; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; phy->slottime = 9; hw->sta_data_size = sizeof(struct mt792x_sta); hw->vif_data_size = sizeof(struct mt792x_vif); hw->chanctx_data_size = sizeof(struct mt792x_chanctx); if (dev->fw_features & MT792x_FW_CAP_CNM) { wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->iface_combinations = if_comb_chanctx; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_chanctx); } else { wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->iface_combinations = if_comb; wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); } wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION); wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE); wiphy->max_remain_on_channel_duration = 5000; wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; wiphy->max_scan_ssids = 4; wiphy->max_sched_scan_plan_interval = MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL; wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN; wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID; wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH; wiphy->max_sched_scan_reqs = 1; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_SPLIT_SCAN_6GHZ; wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, CONNECTION_MONITOR); if (is_mt7921(&dev->mt76)) ieee80211_hw_set(hw, CHANCTX_STA_CSA); if (dev->pm.enable) ieee80211_hw_set(hw, CONNECTION_MONITOR); hw->max_tx_fragments = 4; return 0; } EXPORT_SYMBOL_GPL(mt792x_init_wiphy); static u8 mt792x_get_offload_capability(struct device *dev, const char *fw_wm) { const struct mt76_connac2_fw_trailer *hdr; struct mt792x_realease_info *rel_info; const struct firmware *fw; int ret, i, offset = 0; const u8 *data, *end; u8 offload_caps = 0; ret = request_firmware(&fw, fw_wm, dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) { dev_err(dev, "Invalid firmware\n"); goto out; } data = fw->data; hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); for (i = 0; i < hdr->n_region; i++) { const struct mt76_connac2_fw_region *region; region = (const void *)((const u8 *)hdr - (hdr->n_region - i) * sizeof(*region)); offset += le32_to_cpu(region->len); } data += offset + 16; rel_info = (struct mt792x_realease_info *)data; data += sizeof(*rel_info); end = data + le16_to_cpu(rel_info->len); while (data < end) { rel_info = (struct mt792x_realease_info *)data; data += sizeof(*rel_info); if (rel_info->tag == MT792x_FW_TAG_FEATURE) { struct mt792x_fw_features *features; features = (struct mt792x_fw_features *)data; offload_caps = features->data; break; } data += le16_to_cpu(rel_info->len) + rel_info->pad_len; } out: release_firmware(fw); return offload_caps; } struct ieee80211_ops * mt792x_get_mac80211_ops(struct device *dev, const struct ieee80211_ops *mac80211_ops, void *drv_data, u8 *fw_features) { struct ieee80211_ops *ops; ops = devm_kmemdup(dev, mac80211_ops, sizeof(struct ieee80211_ops), GFP_KERNEL); if (!ops) return NULL; *fw_features = mt792x_get_offload_capability(dev, drv_data); if (!(*fw_features & MT792x_FW_CAP_CNM)) { ops->remain_on_channel = NULL; ops->cancel_remain_on_channel = NULL; ops->add_chanctx = ieee80211_emulate_add_chanctx; ops->remove_chanctx = ieee80211_emulate_remove_chanctx; ops->change_chanctx = ieee80211_emulate_change_chanctx; ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx; ops->assign_vif_chanctx = NULL; ops->unassign_vif_chanctx = NULL; ops->mgd_prepare_tx = NULL; ops->mgd_complete_tx = NULL; } return ops; } EXPORT_SYMBOL_GPL(mt792x_get_mac80211_ops); int mt792x_init_wcid(struct mt792x_dev *dev) { int idx; /* Beacon and mgmt frames should occupy wcid 0 */ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); if (idx) return -ENOSPC; dev->mt76.global_wcid.idx = idx; dev->mt76.global_wcid.hw_key_idx = -1; dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); return 0; } EXPORT_SYMBOL_GPL(mt792x_init_wcid); int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int err = 0; mutex_lock(&pm->mutex); if (!test_bit(MT76_STATE_PM, &mphy->state)) goto out; err = __mt792x_mcu_drv_pmctrl(dev); out: mutex_unlock(&pm->mutex); if (err) mt792x_reset(&dev->mt76); return err; } EXPORT_SYMBOL_GPL(mt792x_mcu_drv_pmctrl); int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int err = 0; mutex_lock(&pm->mutex); if (mt76_connac_skip_fw_pmctrl(mphy, pm)) goto out; err = __mt792x_mcu_fw_pmctrl(dev); out: mutex_unlock(&pm->mutex); if (err) mt792x_reset(&dev->mt76); return err; } EXPORT_SYMBOL_GPL(mt792x_mcu_fw_pmctrl); int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) { int i, err = 0; for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); if (dev->aspm_supported) usleep_range(2000, 3000); if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1)) break; } if (i == MT792x_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "driver own failed\n"); err = -EIO; } return err; } EXPORT_SYMBOL_GPL(__mt792xe_mcu_drv_pmctrl); int mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int err; err = __mt792xe_mcu_drv_pmctrl(dev); if (err < 0) goto out; mt792x_wpdma_reinit_cond(dev); clear_bit(MT76_STATE_PM, &mphy->state); pm->stats.last_wake_event = jiffies; pm->stats.doze_time += pm->stats.last_wake_event - pm->stats.last_doze_event; out: return err; } EXPORT_SYMBOL_GPL(mt792xe_mcu_drv_pmctrl); int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_connac_pm *pm = &dev->pm; int i; for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_OWN_SYNC, 4, 50, 1)) break; } if (i == MT792x_DRV_OWN_RETRY_COUNT) { dev_err(dev->mt76.dev, "firmware own failed\n"); clear_bit(MT76_STATE_PM, &mphy->state); return -EIO; } pm->stats.last_doze_event = jiffies; pm->stats.awake_time += pm->stats.last_doze_event - pm->stats.last_wake_event; return 0; } EXPORT_SYMBOL_GPL(mt792xe_mcu_fw_pmctrl); int mt792x_load_firmware(struct mt792x_dev *dev) { int ret; ret = mt76_connac2_load_patch(&dev->mt76, mt792x_patch_name(dev)); if (ret) return ret; if (mt76_is_sdio(&dev->mt76)) { /* activate again */ ret = __mt792x_mcu_fw_pmctrl(dev); if (!ret) ret = __mt792x_mcu_drv_pmctrl(dev); } ret = mt76_connac2_load_ram(&dev->mt76, mt792x_ram_name(dev), NULL); if (ret) return ret; if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY, MT_TOP_MISC2_FW_N9_RDY, 1500)) { dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); return -EIO; } #ifdef CONFIG_PM dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; #endif /* CONFIG_PM */ dev_dbg(dev->mt76.dev, "Firmware init done\n"); return 0; } EXPORT_SYMBOL_GPL(mt792x_load_firmware); void mt792x_config_mac_addr_list(struct mt792x_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; int i; for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { u8 *addr = dev->macaddr_list[i].addr; memcpy(addr, dev->mphy.macaddr, ETH_ALEN); if (!i) continue; addr[0] |= BIT(1); addr[0] ^= ((i - 1) << 2); } wiphy->addresses = dev->macaddr_list; wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); } EXPORT_SYMBOL_GPL(mt792x_config_mac_addr_list); MODULE_DESCRIPTION("MediaTek MT792x core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); |
241 243 144 143 143 143 143 142 143 143 143 142 142 143 143 142 142 143 143 143 144 144 144 144 144 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 | // SPDX-License-Identifier: GPL-2.0-only /* * Resizable, Scalable, Concurrent Hash Table * * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * * Code partially derived from nft_hash * Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett */ #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> #include <linux/sched.h> #include <linux/rculist.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/rhashtable.h> #include <linux/err.h> #include <linux/export.h> #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U union nested_table { union nested_table __rcu *table; struct rhash_lock_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he) { return rht_head_hashfn(ht, tbl, he, ht->p); } #ifdef CONFIG_PROVE_LOCKING #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { if (!debug_locks) return 1; if (unlikely(tbl->nest)) return 1; return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); } EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) #endif static inline union nested_table *nested_table_top( const struct bucket_table *tbl) { /* The top-level bucket entry does not need RCU protection * because it's set at the same time as tbl->nest. */ return (void *)rcu_dereference_protected(tbl->buckets[0], 1); } static void nested_table_free(union nested_table *ntbl, unsigned int size) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int len = 1 << shift; unsigned int i; ntbl = rcu_dereference_protected(ntbl->table, 1); if (!ntbl) return; if (size > len) { size >>= shift; for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); } kfree(ntbl); } static void nested_bucket_table_free(const struct bucket_table *tbl) { unsigned int size = tbl->size >> tbl->nest; unsigned int len = 1 << tbl->nest; union nested_table *ntbl; unsigned int i; ntbl = nested_table_top(tbl); for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); kfree(ntbl); } static void bucket_table_free(const struct bucket_table *tbl) { if (tbl->nest) nested_bucket_table_free(tbl); kvfree(tbl); } static void bucket_table_free_rcu(struct rcu_head *head) { bucket_table_free(container_of(head, struct bucket_table, rcu)); } static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, bool leaf) { union nested_table *ntbl; int i; ntbl = rcu_dereference(*prev); if (ntbl) return ntbl; ntbl = alloc_hooks_tag(ht->alloc_tag, kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO)); if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) return ntbl; /* Raced with another thread. */ kfree(ntbl); return rcu_dereference(*prev); } static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); struct bucket_table *tbl; size_t size; if (nbuckets < (1 << (shift + 1))) return NULL; size = sizeof(*tbl) + sizeof(tbl->buckets[0]); tbl = alloc_hooks_tag(ht->alloc_tag, kmalloc_noprof(size, gfp|__GFP_ZERO)); if (!tbl) return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, false)) { kfree(tbl); return NULL; } tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; return tbl; } static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { struct bucket_table *tbl = NULL; size_t size; int i; static struct lock_class_key __key; tbl = alloc_hooks_tag(ht->alloc_tag, kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), gfp|__GFP_ZERO, NUMA_NO_NODE)); size = nbuckets; if (tbl == NULL && !gfpflags_allow_blocking(gfp)) { tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); nbuckets = 0; } if (tbl == NULL) return NULL; lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); tbl->size = size; rcu_head_init(&tbl->rcu); INIT_LIST_HEAD(&tbl->walkers); tbl->hash_rnd = get_random_u32(); for (i = 0; i < nbuckets; i++) INIT_RHT_NULLS_HEAD(tbl->buckets[i]); return tbl; } static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *new_tbl; do { new_tbl = tbl; tbl = rht_dereference_rcu(tbl->future_tbl, ht); } while (tbl); return new_tbl; } static int rhashtable_rehash_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); int err = -EAGAIN; struct rhash_head *head, *next, *entry; struct rhash_head __rcu **pprev = NULL; unsigned int new_hash; unsigned long flags; if (new_tbl->nest) goto out; err = -ENOENT; rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), old_tbl, old_hash) { err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); if (rht_is_a_nulls(next)) break; pprev = &entry->next; } if (err) goto out; new_hash = head_hashfn(ht, new_tbl, entry); flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); RCU_INIT_POINTER(entry->next, head); rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); if (pprev) rcu_assign_pointer(*pprev, next); else /* Need to preserved the bit lock. */ rht_assign_locked(bkt, next); out: return err; } static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); unsigned long flags; int err; if (!bkt) return 0; flags = rht_lock(old_tbl, bkt); while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) ; if (err == -ENOENT) err = 0; rht_unlock(old_tbl, bkt, flags); return err; } static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) { /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. * As cmpxchg() provides strong barriers, we do not need * rcu_assign_pointer(). */ if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, new_tbl) != NULL) return -EEXIST; return 0; } static int rhashtable_rehash_table(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl; struct rhashtable_walker *walker; unsigned int old_hash; int err; new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { err = rhashtable_rehash_chain(ht, old_hash); if (err) return err; cond_resched(); } /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); spin_lock(&ht->lock); list_for_each_entry(walker, &old_tbl->walkers, list) walker->tbl = NULL; /* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. * We do this inside the locked region so that * rhashtable_walk_stop() can use rcu_head_after_call_rcu() * to check if it should not re-link the table. */ call_rcu(&old_tbl->rcu, bucket_table_free_rcu); spin_unlock(&ht->lock); return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; } static int rhashtable_rehash_alloc(struct rhashtable *ht, struct bucket_table *old_tbl, unsigned int size) { struct bucket_table *new_tbl; int err; ASSERT_RHT_MUTEX(ht); new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (new_tbl == NULL) return -ENOMEM; err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); if (err) bucket_table_free(new_tbl); return err; } /** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink * * This function shrinks the hash table to fit, i.e., the smallest * size would not cause it to expand right away automatically. * * The caller must ensure that no concurrent resizing occurs by holding * ht->mutex. * * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. * * It is valid to have concurrent insertions and deletions protected by per * bucket locks or concurrent RCU protected lookups and traversals. */ static int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); if (size < ht->p.min_size) size = ht->p.min_size; if (old_tbl->size <= size) return 0; if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; return rhashtable_rehash_alloc(ht, old_tbl, size); } static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; int err = 0; ht = container_of(work, struct rhashtable, run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); tbl = rhashtable_last_table(ht, tbl); if (rht_grow_above_75(ht, tbl)) err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) err = rhashtable_shrink(ht); else if (tbl->nest) err = rhashtable_rehash_alloc(ht, tbl, tbl->size); if (!err || err == -EEXIST) { int nerr; nerr = rhashtable_rehash_table(ht); err = err ?: nerr; } mutex_unlock(&ht->mutex); if (err) schedule_work(&ht->run_work); } static int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *old_tbl; struct bucket_table *new_tbl; unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); size = tbl->size; err = -EBUSY; if (rht_grow_above_75(ht, tbl)) size *= 2; /* Do not schedule more than one rehash */ else if (old_tbl != tbl) goto fail; err = -ENOMEM; new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); if (new_tbl == NULL) goto fail; err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { bucket_table_free(new_tbl); if (err == -EEXIST) err = 0; } else schedule_work(&ht->run_work); return err; fail: /* Do not fail the insert if someone else did a rehash. */ if (likely(rcu_access_pointer(tbl->future_tbl))) return 0; /* Schedule async rehash to retry allocation in process context. */ if (err == -ENOMEM) schedule_work(&ht->run_work); return err; } static void *rhashtable_lookup_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_head __rcu **pprev = NULL; struct rhash_head *head; int elasticity; elasticity = RHT_ELASTICITY; rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { struct rhlist_head *list; struct rhlist_head *plist; elasticity--; if (!key || (ht->p.obj_cmpfn ? ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; continue; } if (!ht->rhlist) return rht_obj(ht, head); list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); if (pprev) rcu_assign_pointer(*pprev, obj); else /* Need to preserve the bit lock */ rht_assign_locked(bkt, obj); return NULL; } if (elasticity <= 0) return ERR_PTR(-EAGAIN); return ERR_PTR(-ENOENT); } static struct bucket_table *rhashtable_insert_one( struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, void *data) { struct bucket_table *new_tbl; struct rhash_head *head; if (!IS_ERR_OR_NULL(data)) return ERR_PTR(-EEXIST); if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (new_tbl) return new_tbl; if (PTR_ERR(data) != -ENOENT) return ERR_CAST(data); if (unlikely(rht_grow_above_max(ht, tbl))) return ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); head = rht_ptr(bkt, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (ht->rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } /* bkt is always the head of the list, so it holds * the lock, which we need to preserve */ rht_assign_locked(bkt, obj); return NULL; } static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct rhash_head *obj) { struct bucket_table *new_tbl; struct bucket_table *tbl; struct rhash_lock_head __rcu **bkt; unsigned long flags; unsigned int hash; void *data; new_tbl = rcu_dereference(ht->tbl); do { tbl = new_tbl; hash = rht_head_hashfn(ht, tbl, obj, ht->p); if (rcu_access_pointer(tbl->future_tbl)) /* Failure is OK */ bkt = rht_bucket_var(tbl, hash); else bkt = rht_bucket_insert(ht, tbl, hash); if (bkt == NULL) { new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { bool inserted; flags = rht_lock(tbl, bkt); data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, hash, obj, data); inserted = data && !new_tbl; if (inserted) atomic_inc(&ht->nelems); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); rht_unlock(tbl, bkt, flags); if (inserted && rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); } } while (!IS_ERR_OR_NULL(new_tbl)); if (PTR_ERR(data) == -EAGAIN) data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: -EAGAIN); return data; } void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj) { void *data; do { rcu_read_lock(); data = rhashtable_try_insert(ht, key, obj); rcu_read_unlock(); } while (PTR_ERR(data) == -EAGAIN); return data; } EXPORT_SYMBOL_GPL(rhashtable_insert_slow); /** * rhashtable_walk_enter - Initialise an iterator * @ht: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * * This function may be called from any process context, including * non-preemptible context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. */ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) { iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; iter->end_of_table = 0; spin_lock(&ht->lock); iter->walker.tbl = rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); list_add(&iter->walker.list, &iter->walker.tbl->walkers); spin_unlock(&ht->lock); } EXPORT_SYMBOL_GPL(rhashtable_walk_enter); /** * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * * This function frees resources allocated by rhashtable_walk_enter. */ void rhashtable_walk_exit(struct rhashtable_iter *iter) { spin_lock(&iter->ht->lock); if (iter->walker.tbl) list_del(&iter->walker.list); spin_unlock(&iter->ht->lock); } EXPORT_SYMBOL_GPL(rhashtable_walk_exit); /** * rhashtable_walk_start_check - Start a hash table walk * @iter: Hash table iterator * * Start a hash table walk at the current iterator position. Note that we take * the RCU lock in all cases including when we return an error. So you must * always call rhashtable_walk_stop to clean up. * * Returns zero if successful. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may use it immediately * by calling rhashtable_walk_next. * * rhashtable_walk_start is defined as an inline variant that returns * void. This is preferred in cases where the caller would ignore * resize events and always continue. */ int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU) { struct rhashtable *ht = iter->ht; bool rhlist = ht->rhlist; rcu_read_lock(); spin_lock(&ht->lock); if (iter->walker.tbl) list_del(&iter->walker.list); spin_unlock(&ht->lock); if (iter->end_of_table) return 0; if (!iter->walker.tbl) { iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); iter->slot = 0; iter->skip = 0; return -EAGAIN; } if (iter->p && !rhlist) { /* * We need to validate that 'p' is still in the table, and * if so, update 'skip' */ struct rhash_head *p; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { skip++; if (p == iter->p) { iter->skip = skip; goto found; } } iter->p = NULL; } else if (iter->p && rhlist) { /* Need to validate that 'list' is still in the table, and * if so, update 'skip' and 'p'. */ struct rhash_head *p; struct rhlist_head *list; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { for (list = container_of(p, struct rhlist_head, rhead); list; list = rcu_dereference(list->next)) { skip++; if (list == iter->list) { iter->p = p; iter->skip = skip; goto found; } } } iter->p = NULL; } found: return 0; } EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); /** * __rhashtable_walk_find_next - Find the next element in a table (or the first * one in case of a new walk). * * @iter: Hash table iterator * * Returns the found object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. */ static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) { struct bucket_table *tbl = iter->walker.tbl; struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (!tbl) return NULL; for (; iter->slot < tbl->size; iter->slot++) { int skip = iter->skip; rht_for_each_rcu(p, tbl, iter->slot) { if (rhlist) { list = container_of(p, struct rhlist_head, rhead); do { if (!skip) goto next; skip--; list = rcu_dereference(list->next); } while (list); continue; } if (!skip) break; skip--; } next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } iter->skip = 0; } iter->p = NULL; /* Ensure we see any new tables. */ smp_rmb(); iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (iter->walker.tbl) { iter->slot = 0; iter->skip = 0; return ERR_PTR(-EAGAIN); } else { iter->end_of_table = true; } return NULL; } /** * rhashtable_walk_next - Return the next object and advance the iterator * @iter: Hash table iterator * * Note that you must call rhashtable_walk_stop when you are finished * with the walk. * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (p) { if (!rhlist || !(list = rcu_dereference(list->next))) { p = rcu_dereference(p->next); list = container_of(p, struct rhlist_head, rhead); } if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } /* At the end of this slot, switch to next one and then find * next entry from that point. */ iter->skip = 0; iter->slot++; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_next); /** * rhashtable_walk_peek - Return the next object but don't advance the iterator * @iter: Hash table iterator * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_peek(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; if (p) return rht_obj(ht, ht->rhlist ? &list->rhead : p); /* No object found in current iter, find next one in the table. */ if (iter->skip) { /* A nonzero skip value points to the next entry in the table * beyond that last one that was found. Decrement skip so * we find the current value. __rhashtable_walk_find_next * will restore the original value of skip assuming that * the table hasn't changed. */ iter->skip--; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_peek); /** * rhashtable_walk_stop - Finish a hash table walk * @iter: Hash table iterator * * Finish a hash table walk. Does not reset the iterator to the start of the * hash table. */ void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU) { struct rhashtable *ht; struct bucket_table *tbl = iter->walker.tbl; if (!tbl) goto out; ht = iter->ht; spin_lock(&ht->lock); if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) /* This bucket table is being freed, don't re-link it. */ iter->walker.tbl = NULL; else list_add(&iter->walker.list, &tbl->walkers); spin_unlock(&ht->lock); out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_walk_stop); static size_t rounded_hashtable_size(const struct rhashtable_params *params) { size_t retsize; if (params->nelem_hint) retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), (unsigned long)params->min_size); else retsize = max(HASH_DEFAULT_SIZE, (unsigned long)params->min_size); return retsize; } static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) { return jhash2(key, length, seed); } /** * rhashtable_init - initialize a new hash table * @ht: hash table to be initialized * @params: configuration parameters * * Initializes a new hash table based on the provided configuration * parameters. A table can be configured either with a variable or * fixed length key: * * Configuration Example 1: Fixed length keys * struct test_obj { * int key; * void * my_member; * struct rhash_head node; * }; * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, * }; * * Configuration Example 2: Variable length keys * struct test_obj { * [...] * struct rhash_head node; * }; * * u32 my_hash_fn(const void *data, u32 len, u32 seed) * { * struct test_obj *obj = data; * * return [... hash ...]; * } * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .hashfn = jhash, * .obj_hashfn = my_hash_fn, * }; */ int rhashtable_init_noprof(struct rhashtable *ht, const struct rhashtable_params *params) { struct bucket_table *tbl; size_t size; if ((!params->key_len && !params->obj_hashfn) || (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); spin_lock_init(&ht->lock); memcpy(&ht->p, params, sizeof(*params)); alloc_tag_record(ht->alloc_tag); if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); /* Cap total entries at 2^31 to avoid nelems overflow. */ ht->max_elems = 1u << 31; if (params->max_size) { ht->p.max_size = rounddown_pow_of_two(params->max_size); if (ht->p.max_size < ht->max_elems / 2) ht->max_elems = ht->p.max_size * 2; } ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); size = rounded_hashtable_size(&ht->p); ht->key_len = ht->p.key_len; if (!params->hashfn) { ht->p.hashfn = jhash; if (!(ht->key_len & (sizeof(u32) - 1))) { ht->key_len /= sizeof(u32); ht->p.hashfn = rhashtable_jhash2; } } /* * This is api initialization and thus we need to guarantee the * initial rhashtable allocation. Upon failure, retry with the * smallest possible size with __GFP_NOFAIL semantics. */ tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (unlikely(tbl == NULL)) { size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); } atomic_set(&ht->nelems, 0); RCU_INIT_POINTER(ht->tbl, tbl); INIT_WORK(&ht->run_work, rht_deferred_worker); return 0; } EXPORT_SYMBOL_GPL(rhashtable_init_noprof); /** * rhltable_init - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * * See documentation for rhashtable_init. */ int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params) { int err; err = rhashtable_init_noprof(&hlt->ht, params); hlt->ht.rhlist = true; return err; } EXPORT_SYMBOL_GPL(rhltable_init_noprof); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), void *arg) { struct rhlist_head *list; if (!ht->rhlist) { free_fn(rht_obj(ht, obj), arg); return; } list = container_of(obj, struct rhlist_head, rhead); do { obj = &list->rhead; list = rht_dereference(list->next, ht); free_fn(rht_obj(ht, obj), arg); } while (list); } /** * rhashtable_free_and_destroy - free elements and destroy hash table * @ht: the hash table to destroy * @free_fn: callback to release resources of element * @arg: pointer passed to free_fn * * Stops an eventual async resize. If defined, invokes free_fn for each * element to releasal resources. Please note that RCU protected * readers may still be accessing the elements. Releasing of resources * must occur in a compatible manner. Then frees the bucket array. * * This function will eventually sleep to wait for an async resize * to complete. The caller is responsible that no further write operations * occurs in parallel. */ void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) { struct bucket_table *tbl, *next_tbl; unsigned int i; cancel_work_sync(&ht->run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); restart: if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; cond_resched(); for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); pos = next, next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL) rhashtable_free_one(ht, pos, free_fn, arg); } } next_tbl = rht_dereference(tbl->future_tbl, ht); bucket_table_free(tbl); if (next_tbl) { tbl = next_tbl; goto restart; } mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); void rhashtable_destroy(struct rhashtable *ht) { return rhashtable_free_and_destroy(ht, NULL, NULL); } EXPORT_SYMBOL_GPL(rhashtable_destroy); struct rhash_lock_head __rcu **__rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; union nested_table *ntbl; ntbl = nested_table_top(tbl); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); size >>= shift; subhash >>= shift; } if (!ntbl) return NULL; return &ntbl[subhash].bucket; } EXPORT_SYMBOL_GPL(__rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) { static struct rhash_lock_head __rcu *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); return __rht_bucket_nested(tbl, hash) ?: &rhnull; } EXPORT_SYMBOL_GPL(rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; ntbl = nested_table_top(tbl); hash >>= tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift)); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift)); } if (!ntbl) return NULL; return &ntbl[hash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |
154 154 154 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 | // SPDX-License-Identifier: GPL-2.0-only /* * mm/percpu-vm.c - vmalloc area based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * Chunks are mapped into vmalloc areas and populated page by page. * This is the default chunk allocator. */ #include "internal.h" static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { /* must not be used on pre-mapped chunk */ WARN_ON(chunk->immutable); return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); } /** * pcpu_get_pages - get temp pages array * * Returns pointer to array of pointers to struct page which can be indexed * with pcpu_page_idx(). Note that there is only one array and accesses * should be serialized by pcpu_alloc_mutex. * * RETURNS: * Pointer to temp pages array on success. */ static struct page **pcpu_get_pages(void) { static struct page **pages; size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); lockdep_assert_held(&pcpu_alloc_mutex); if (!pages) pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); return pages; } /** * pcpu_free_pages - free pages which were allocated for @chunk * @chunk: chunk pages were allocated for * @pages: array of pages to be freed, indexed by pcpu_page_idx() * @page_start: page index of the first page to be freed * @page_end: page index of the last page to be freed + 1 * * Free pages [@page_start and @page_end) in @pages for all units. * The pages were allocated for @chunk. */ static void pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page = pages[pcpu_page_idx(cpu, i)]; if (page) __free_page(page); } } } /** * pcpu_alloc_pages - allocates pages for @chunk * @chunk: target chunk * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() * @page_start: page index of the first page to be allocated * @page_end: page index of the last page to be allocated + 1 * @gfp: allocation flags passed to the underlying allocator * * Allocate pages [@page_start,@page_end) into @pages for all units. * The allocation is for @chunk. Percpu core doesn't care about the * content of @pages and will pass it verbatim to pcpu_map_pages(). */ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) { unsigned int cpu, tcpu; int i; gfp |= __GFP_HIGHMEM; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); if (!*pagep) goto err; } } return 0; err: while (--i >= page_start) __free_page(pages[pcpu_page_idx(cpu, i)]); for_each_possible_cpu(tcpu) { if (tcpu == cpu) break; for (i = page_start; i < page_end; i++) __free_page(pages[pcpu_page_idx(tcpu, i)]); } return -ENOMEM; } /** * pcpu_pre_unmap_flush - flush cache prior to unmapping * @chunk: chunk the regions to be flushed belongs to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages in [@page_start,@page_end) of @chunk are about to be * unmapped. Flush cache. As each flushing trial can be very * expensive, issue flush on the whole region at once rather than * doing it for each cpu. This could be an overkill but is more * scalable. */ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vunmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) { vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); } /** * pcpu_unmap_pages - unmap pages out of a pcpu_chunk * @chunk: chunk of interest * @pages: pages array which can be used to pass information to free * @page_start: page index of the first page to unmap * @page_end: page index of the last page to unmap + 1 * * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. * Corresponding elements in @pages were cleared by the caller and can * be used to carry information to pcpu_free_pages() which will be * called after all unmaps are finished. The caller should call * proper pre/post flush functions. */ static void pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page; page = pcpu_chunk_page(chunk, cpu, i); WARN_ON(!page); pages[pcpu_page_idx(cpu, i)] = page; } __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), page_end - page_start); } } /** * pcpu_post_unmap_tlb_flush - flush TLB after unmapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush * TLB for the regions. This can be skipped if the area is to be * returned to vmalloc as vmalloc will handle TLB flushing lazily. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_tlb_kernel_range( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static int __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) { return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), PAGE_KERNEL, pages, PAGE_SHIFT); } /** * pcpu_map_pages - map pages into a pcpu_chunk * @chunk: chunk of interest * @pages: pages array containing pages to be mapped * @page_start: page index of the first page to map * @page_end: page index of the last page to map + 1 * * For each cpu, map pages [@page_start,@page_end) into @chunk. The * caller is responsible for calling pcpu_post_map_flush() after all * mappings are complete. * * This function is responsible for setting up whatever is necessary for * reverse lookup (addr -> chunk). */ static int pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu, tcpu; int i, err; for_each_possible_cpu(cpu) { err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), &pages[pcpu_page_idx(cpu, page_start)], page_end - page_start); if (err < 0) goto err; for (i = page_start; i < page_end; i++) pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], chunk); } return 0; err: for_each_possible_cpu(tcpu) { __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), page_end - page_start); if (tcpu == cpu) break; } pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); return err; } /** * pcpu_post_map_flush - flush cache after mapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been mapped. Flush * cache. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } /** * pcpu_populate_chunk - populate and map an area of a pcpu_chunk * @chunk: chunk of interest * @page_start: the start page * @page_end: the end page * @gfp: allocation flags passed to the underlying memory allocator * * For each cpu, populate and map pages [@page_start,@page_end) into * @chunk. * * CONTEXT: * pcpu_alloc_mutex, does GFP_KERNEL allocation. */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) { struct page **pages; pages = pcpu_get_pages(); if (!pages) return -ENOMEM; if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) return -ENOMEM; if (pcpu_map_pages(chunk, pages, page_start, page_end)) { pcpu_free_pages(chunk, pages, page_start, page_end); return -ENOMEM; } pcpu_post_map_flush(chunk, page_start, page_end); return 0; } /** * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk * @chunk: chunk to depopulate * @page_start: the start page * @page_end: the end page * * For each cpu, depopulate and unmap pages [@page_start,@page_end) * from @chunk. * * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the * region back to vmalloc() which will lazily flush the tlb. * * CONTEXT: * pcpu_alloc_mutex. */ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) { struct page **pages; /* * If control reaches here, there must have been at least one * successful population attempt so the temp pages array must * be available now. */ pages = pcpu_get_pages(); BUG_ON(!pages); /* unmap and free */ pcpu_pre_unmap_flush(chunk, page_start, page_end); pcpu_unmap_pages(chunk, pages, page_start, page_end); pcpu_free_pages(chunk, pages, page_start, page_end); } static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) { struct pcpu_chunk *chunk; struct vm_struct **vms; chunk = pcpu_alloc_chunk(gfp); if (!chunk) return NULL; vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, pcpu_nr_groups, pcpu_atom_size); if (!vms) { pcpu_free_chunk(chunk); return NULL; } chunk->data = vms; chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(chunk->base_addr); return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_stats_chunk_dealloc(); trace_percpu_destroy_chunk(chunk->base_addr); if (chunk->data) pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return vmalloc_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { /* no extra restriction */ return 0; } /** * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim * @chunk: chunk of interest * * This is the entry point for percpu reclaim. If a chunk qualifies, it is then * isolated and managed in separate lists at the back of pcpu_slot: sidelined * and to_depopulate respectively. The to_depopulate list holds chunks slated * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once * they are on this list. Once depopulated, they are moved onto the sidelined * list which enables them to be pulled back in for allocation if no other chunk * can suffice the allocation. */ static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) { /* do not reclaim either the first chunk or reserved chunk */ if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk) return false; /* * If it is isolated, it may be on the sidelined list so move it back to * the to_depopulate list. If we hit at least 1/4 pages empty pages AND * there is no system-wide shortage of empty pages aside from this * chunk, move it to the to_depopulate list. */ return ((chunk->isolated && chunk->nr_empty_pop_pages) || (pcpu_nr_empty_pop_pages > (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) && chunk->nr_empty_pop_pages >= chunk->nr_pages / 4)); } |
4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 | // SPDX-License-Identifier: GPL-2.0+ /* * Nano River Technologies viperboard GPIO lib driver * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <poeschel@lemonage.de> * All rights reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/gpio/driver.h> #include <linux/mfd/viperboard.h> #define VPRBRD_GPIOA_CLK_1MHZ 0 #define VPRBRD_GPIOA_CLK_100KHZ 1 #define VPRBRD_GPIOA_CLK_10KHZ 2 #define VPRBRD_GPIOA_CLK_1KHZ 3 #define VPRBRD_GPIOA_CLK_100HZ 4 #define VPRBRD_GPIOA_CLK_10HZ 5 #define VPRBRD_GPIOA_FREQ_DEFAULT 1000 #define VPRBRD_GPIOA_CMD_CONT 0x00 #define VPRBRD_GPIOA_CMD_PULSE 0x01 #define VPRBRD_GPIOA_CMD_PWM 0x02 #define VPRBRD_GPIOA_CMD_SETOUT 0x03 #define VPRBRD_GPIOA_CMD_SETIN 0x04 #define VPRBRD_GPIOA_CMD_SETINT 0x05 #define VPRBRD_GPIOA_CMD_GETIN 0x06 #define VPRBRD_GPIOB_CMD_SETDIR 0x00 #define VPRBRD_GPIOB_CMD_SETVAL 0x01 struct vprbrd_gpioa_msg { u8 cmd; u8 clk; u8 offset; u8 t1; u8 t2; u8 invert; u8 pwmlevel; u8 outval; u8 risefall; u8 answer; u8 __fill; } __packed; struct vprbrd_gpiob_msg { u8 cmd; u16 val; u16 mask; } __packed; struct vprbrd_gpio { struct gpio_chip gpioa; /* gpio a related things */ u32 gpioa_out; u32 gpioa_val; struct gpio_chip gpiob; /* gpio b related things */ u32 gpiob_out; u32 gpiob_val; struct vprbrd *vb; }; /* gpioa sampling clock module parameter */ static unsigned char gpioa_clk; static unsigned int gpioa_freq = VPRBRD_GPIOA_FREQ_DEFAULT; module_param(gpioa_freq, uint, 0); MODULE_PARM_DESC(gpioa_freq, "gpio-a sampling freq in Hz (default is 1000Hz) valid values: 10, 100, 1000, 10000, 100000, 1000000"); /* ----- begin of gipo a chip -------------------------------------------- */ static int vprbrd_gpioa_get(struct gpio_chip *chip, unsigned int offset) { int ret, answer, error = 0; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf; /* if io is set to output, just return the saved value */ if (gpio->gpioa_out & (1 << offset)) return !!(gpio->gpioa_val & (1 << offset)); mutex_lock(&vb->lock); gamsg->cmd = VPRBRD_GPIOA_CMD_GETIN; gamsg->clk = 0x00; gamsg->offset = offset; gamsg->t1 = 0x00; gamsg->t2 = 0x00; gamsg->invert = 0x00; gamsg->pwmlevel = 0x00; gamsg->outval = 0x00; gamsg->risefall = 0x00; gamsg->answer = 0x00; gamsg->__fill = 0x00; ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS); if (ret != sizeof(struct vprbrd_gpioa_msg)) error = -EREMOTEIO; ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS); answer = gamsg->answer & 0x01; mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_gpioa_msg)) error = -EREMOTEIO; if (error) return error; return answer; } static void vprbrd_gpioa_set(struct gpio_chip *chip, unsigned int offset, int value) { int ret; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf; if (gpio->gpioa_out & (1 << offset)) { if (value) gpio->gpioa_val |= (1 << offset); else gpio->gpioa_val &= ~(1 << offset); mutex_lock(&vb->lock); gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT; gamsg->clk = 0x00; gamsg->offset = offset; gamsg->t1 = 0x00; gamsg->t2 = 0x00; gamsg->invert = 0x00; gamsg->pwmlevel = 0x00; gamsg->outval = value; gamsg->risefall = 0x00; gamsg->answer = 0x00; gamsg->__fill = 0x00; ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS); mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_gpioa_msg)) dev_err(chip->parent, "usb error setting pin value\n"); } } static int vprbrd_gpioa_direction_input(struct gpio_chip *chip, unsigned int offset) { int ret; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf; gpio->gpioa_out &= ~(1 << offset); mutex_lock(&vb->lock); gamsg->cmd = VPRBRD_GPIOA_CMD_SETIN; gamsg->clk = gpioa_clk; gamsg->offset = offset; gamsg->t1 = 0x00; gamsg->t2 = 0x00; gamsg->invert = 0x00; gamsg->pwmlevel = 0x00; gamsg->outval = 0x00; gamsg->risefall = 0x00; gamsg->answer = 0x00; gamsg->__fill = 0x00; ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS); mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_gpioa_msg)) return -EREMOTEIO; return 0; } static int vprbrd_gpioa_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { int ret; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf; gpio->gpioa_out |= (1 << offset); if (value) gpio->gpioa_val |= (1 << offset); else gpio->gpioa_val &= ~(1 << offset); mutex_lock(&vb->lock); gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT; gamsg->clk = 0x00; gamsg->offset = offset; gamsg->t1 = 0x00; gamsg->t2 = 0x00; gamsg->invert = 0x00; gamsg->pwmlevel = 0x00; gamsg->outval = value; gamsg->risefall = 0x00; gamsg->answer = 0x00; gamsg->__fill = 0x00; ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS); mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_gpioa_msg)) return -EREMOTEIO; return 0; } /* ----- end of gpio a chip ---------------------------------------------- */ /* ----- begin of gipo b chip -------------------------------------------- */ static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned int offset, unsigned int dir) { struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf; int ret; gbmsg->cmd = VPRBRD_GPIOB_CMD_SETDIR; gbmsg->val = cpu_to_be16(dir << offset); gbmsg->mask = cpu_to_be16(0x0001 << offset); ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS); if (ret != sizeof(struct vprbrd_gpiob_msg)) return -EREMOTEIO; return 0; } static int vprbrd_gpiob_get(struct gpio_chip *chip, unsigned int offset) { int ret; u16 val; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf; /* if io is set to output, just return the saved value */ if (gpio->gpiob_out & (1 << offset)) return gpio->gpiob_val & (1 << offset); mutex_lock(&vb->lock); ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS); val = gbmsg->val; mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_gpiob_msg)) return ret; /* cache the read values */ gpio->gpiob_val = be16_to_cpu(val); return (gpio->gpiob_val >> offset) & 0x1; } static void vprbrd_gpiob_set(struct gpio_chip *chip, unsigned int offset, int value) { int ret; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf; if (gpio->gpiob_out & (1 << offset)) { if (value) gpio->gpiob_val |= (1 << offset); else gpio->gpiob_val &= ~(1 << offset); mutex_lock(&vb->lock); gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL; gbmsg->val = cpu_to_be16(value << offset); gbmsg->mask = cpu_to_be16(0x0001 << offset); ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS); mutex_unlock(&vb->lock); if (ret != sizeof(struct vprbrd_gpiob_msg)) dev_err(chip->parent, "usb error setting pin value\n"); } } static int vprbrd_gpiob_direction_input(struct gpio_chip *chip, unsigned int offset) { int ret; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; gpio->gpiob_out &= ~(1 << offset); mutex_lock(&vb->lock); ret = vprbrd_gpiob_setdir(vb, offset, 0); mutex_unlock(&vb->lock); if (ret) dev_err(chip->parent, "usb error setting pin to input\n"); return ret; } static int vprbrd_gpiob_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { int ret; struct vprbrd_gpio *gpio = gpiochip_get_data(chip); struct vprbrd *vb = gpio->vb; gpio->gpiob_out |= (1 << offset); mutex_lock(&vb->lock); ret = vprbrd_gpiob_setdir(vb, offset, 1); if (ret) dev_err(chip->parent, "usb error setting pin to output\n"); mutex_unlock(&vb->lock); vprbrd_gpiob_set(chip, offset, value); return ret; } /* ----- end of gpio b chip ---------------------------------------------- */ static int vprbrd_gpio_probe(struct platform_device *pdev) { struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent); struct vprbrd_gpio *vb_gpio; int ret; vb_gpio = devm_kzalloc(&pdev->dev, sizeof(*vb_gpio), GFP_KERNEL); if (vb_gpio == NULL) return -ENOMEM; vb_gpio->vb = vb; /* registering gpio a */ vb_gpio->gpioa.label = "viperboard gpio a"; vb_gpio->gpioa.parent = &pdev->dev; vb_gpio->gpioa.owner = THIS_MODULE; vb_gpio->gpioa.base = -1; vb_gpio->gpioa.ngpio = 16; vb_gpio->gpioa.can_sleep = true; vb_gpio->gpioa.set = vprbrd_gpioa_set; vb_gpio->gpioa.get = vprbrd_gpioa_get; vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input; vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output; ret = devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpioa, vb_gpio); if (ret < 0) return ret; /* registering gpio b */ vb_gpio->gpiob.label = "viperboard gpio b"; vb_gpio->gpiob.parent = &pdev->dev; vb_gpio->gpiob.owner = THIS_MODULE; vb_gpio->gpiob.base = -1; vb_gpio->gpiob.ngpio = 16; vb_gpio->gpiob.can_sleep = true; vb_gpio->gpiob.set = vprbrd_gpiob_set; vb_gpio->gpiob.get = vprbrd_gpiob_get; vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input; vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output; return devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpiob, vb_gpio); } static struct platform_driver vprbrd_gpio_driver = { .driver.name = "viperboard-gpio", .probe = vprbrd_gpio_probe, }; static int __init vprbrd_gpio_init(void) { switch (gpioa_freq) { case 1000000: gpioa_clk = VPRBRD_GPIOA_CLK_1MHZ; break; case 100000: gpioa_clk = VPRBRD_GPIOA_CLK_100KHZ; break; case 10000: gpioa_clk = VPRBRD_GPIOA_CLK_10KHZ; break; case 1000: gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ; break; case 100: gpioa_clk = VPRBRD_GPIOA_CLK_100HZ; break; case 10: gpioa_clk = VPRBRD_GPIOA_CLK_10HZ; break; default: pr_warn("invalid gpioa_freq (%d)\n", gpioa_freq); gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ; } return platform_driver_register(&vprbrd_gpio_driver); } subsys_initcall(vprbrd_gpio_init); static void __exit vprbrd_gpio_exit(void) { platform_driver_unregister(&vprbrd_gpio_driver); } module_exit(vprbrd_gpio_exit); MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>"); MODULE_DESCRIPTION("GPIO driver for Nano River Techs Viperboard"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:viperboard-gpio"); |
245 239 297 231 297 234 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* audit.h -- Auditing support * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * Written by Rickard E. (Rik) Faith <faith@redhat.com> */ #ifndef _LINUX_AUDIT_H_ #define _LINUX_AUDIT_H_ #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/audit_arch.h> #include <uapi/linux/audit.h> #include <uapi/linux/netfilter/nf_tables.h> #include <uapi/linux/fanotify.h> #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) struct audit_sig_info { uid_t uid; pid_t pid; char ctx[]; }; struct audit_buffer; struct audit_context; struct inode; struct netlink_skb_parms; struct path; struct linux_binprm; struct mq_attr; struct mqstat; struct audit_watch; struct audit_tree; struct sk_buff; struct kern_ipc_perm; struct audit_krule { u32 pflags; u32 flags; u32 listnr; u32 action; u32 mask[AUDIT_BITMASK_SIZE]; u32 buflen; /* for data alloc on list rules */ u32 field_count; char *filterkey; /* ties events to rules */ struct audit_field *fields; struct audit_field *arch_f; /* quick access to arch field */ struct audit_field *inode_f; /* quick access to an inode field */ struct audit_watch *watch; /* associated watch */ struct audit_tree *tree; /* associated watched tree */ struct audit_fsnotify_mark *exe; struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ struct list_head list; /* for AUDIT_LIST* purposes only */ u64 prio; }; /* Flag to indicate legacy AUDIT_LOGINUID unset usage */ #define AUDIT_LOGINUID_LEGACY 0x1 struct audit_field { u32 type; union { u32 val; kuid_t uid; kgid_t gid; struct { char *lsm_str; void *lsm_rule; }; }; u32 op; }; enum audit_ntp_type { AUDIT_NTP_OFFSET, AUDIT_NTP_FREQ, AUDIT_NTP_STATUS, AUDIT_NTP_TAI, AUDIT_NTP_TICK, AUDIT_NTP_ADJUST, AUDIT_NTP_NVALS /* count */ }; #ifdef CONFIG_AUDITSYSCALL struct audit_ntp_val { long long oldval, newval; }; struct audit_ntp_data { struct audit_ntp_val vals[AUDIT_NTP_NVALS]; }; #else struct audit_ntp_data {}; #endif enum audit_nfcfgop { AUDIT_XT_OP_REGISTER, AUDIT_XT_OP_REPLACE, AUDIT_XT_OP_UNREGISTER, AUDIT_NFT_OP_TABLE_REGISTER, AUDIT_NFT_OP_TABLE_UNREGISTER, AUDIT_NFT_OP_CHAIN_REGISTER, AUDIT_NFT_OP_CHAIN_UNREGISTER, AUDIT_NFT_OP_RULE_REGISTER, AUDIT_NFT_OP_RULE_UNREGISTER, AUDIT_NFT_OP_SET_REGISTER, AUDIT_NFT_OP_SET_UNREGISTER, AUDIT_NFT_OP_SETELEM_REGISTER, AUDIT_NFT_OP_SETELEM_UNREGISTER, AUDIT_NFT_OP_GEN_REGISTER, AUDIT_NFT_OP_OBJ_REGISTER, AUDIT_NFT_OP_OBJ_UNREGISTER, AUDIT_NFT_OP_OBJ_RESET, AUDIT_NFT_OP_FLOWTABLE_REGISTER, AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, AUDIT_NFT_OP_SETELEM_RESET, AUDIT_NFT_OP_RULE_RESET, AUDIT_NFT_OP_INVALID, }; extern int __init audit_register_class(int class, unsigned *list); extern int audit_classify_syscall(int abi, unsigned syscall); extern int audit_classify_arch(int arch); /* only for compat system calls */ extern unsigned compat_write_class[]; extern unsigned compat_read_class[]; extern unsigned compat_dir_class[]; extern unsigned compat_chattr_class[]; extern unsigned compat_signal_class[]; /* audit_names->type values */ #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ #define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */ #define AUDIT_TYPE_PARENT 2 /* a parent audit record */ #define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */ #define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */ /* maximized args number that audit_socketcall can process */ #define AUDITSC_ARGS 6 /* bit values for ->signal->audit_tty */ #define AUDIT_TTY_ENABLE BIT(0) #define AUDIT_TTY_LOG_PASSWD BIT(1) struct filename; #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ extern __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...); extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); extern __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); extern void audit_log_end(struct audit_buffer *ab); extern bool audit_string_contains_control(const char *string, size_t len); extern void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len); extern void audit_log_n_string(struct audit_buffer *ab, const char *buf, size_t n); extern void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t n); extern void audit_log_untrustedstring(struct audit_buffer *ab, const char *string); extern void audit_log_d_path(struct audit_buffer *ab, const char *prefix, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); extern void audit_log_path_denied(int type, const char *operation); extern void audit_log_lost(const char *message); extern int audit_log_task_context(struct audit_buffer *ab); extern void audit_log_task_info(struct audit_buffer *ab); extern int audit_update_lsm_rules(void); /* Private API (for audit.c only) */ extern int audit_rule_change(int type, int seq, void *data, size_t datasz); extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); extern int audit_set_loginuid(kuid_t loginuid); static inline kuid_t audit_get_loginuid(struct task_struct *tsk) { return tsk->loginuid; } static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return tsk->sessionid; } extern u32 audit_enabled; extern int audit_signal_info(int sig, struct task_struct *t); #else /* CONFIG_AUDIT */ static inline __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) { } static inline struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) { return NULL; } static inline __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) { } static inline void audit_log_end(struct audit_buffer *ab) { } static inline void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len) { } static inline void audit_log_n_string(struct audit_buffer *ab, const char *buf, size_t n) { } static inline void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t n) { } static inline void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) { } static inline void audit_log_d_path(struct audit_buffer *ab, const char *prefix, const struct path *path) { } static inline void audit_log_key(struct audit_buffer *ab, char *key) { } static inline void audit_log_path_denied(int type, const char *operation) { } static inline int audit_log_task_context(struct audit_buffer *ab) { return 0; } static inline void audit_log_task_info(struct audit_buffer *ab) { } static inline kuid_t audit_get_loginuid(struct task_struct *tsk) { return INVALID_UID; } static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return AUDIT_SID_UNSET; } #define audit_enabled AUDIT_OFF static inline int audit_signal_info(int sig, struct task_struct *t) { return 0; } #endif /* CONFIG_AUDIT */ #ifdef CONFIG_AUDIT_COMPAT_GENERIC #define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT)) #else #define audit_is_compat(arch) false #endif #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ #define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ #ifdef CONFIG_AUDITSYSCALL #include <asm/syscall.h> /* for syscall_get_arch() */ /* These are defined in auditsc.c */ /* Public API */ extern int audit_alloc(struct task_struct *task); extern void __audit_free(struct task_struct *task); extern void __audit_uring_entry(u8 op); extern void __audit_uring_exit(int success, long code); extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3); extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); extern void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type); extern void audit_seccomp(unsigned long syscall, long signr, int code); extern void audit_seccomp_actions_logged(const char *names, const char *old_names, int res); extern void __audit_ptrace(struct task_struct *t); static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) { task->audit_context = ctx; } static inline struct audit_context *audit_context(void) { return current->audit_context; } static inline bool audit_dummy_context(void) { void *p = audit_context(); return !p || *(int *)p; } static inline void audit_free(struct task_struct *task) { if (unlikely(task->audit_context)) __audit_free(task); } static inline void audit_uring_entry(u8 op) { /* * We intentionally check audit_context() before audit_enabled as most * Linux systems (as of ~2021) rely on systemd which forces audit to * be enabled regardless of the user's audit configuration. */ if (unlikely(audit_context() && audit_enabled)) __audit_uring_entry(op); } static inline void audit_uring_exit(int success, long code) { if (unlikely(audit_context())) __audit_uring_exit(success, code); } static inline void audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { if (unlikely(audit_context())) __audit_syscall_entry(major, a0, a1, a2, a3); } static inline void audit_syscall_exit(void *pt_regs) { if (unlikely(audit_context())) { int success = is_syscall_success(pt_regs); long return_code = regs_return_value(pt_regs); __audit_syscall_exit(success, return_code); } } static inline struct filename *audit_reusename(const __user char *name) { if (unlikely(!audit_dummy_context())) return __audit_reusename(name); return NULL; } static inline void audit_getname(struct filename *name) { if (unlikely(!audit_dummy_context())) __audit_getname(name); } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, aflags); } static inline void audit_file(struct file *file) { if (unlikely(!audit_dummy_context())) __audit_file(file); } static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); } static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { if (unlikely(!audit_dummy_context())) __audit_inode_child(parent, dentry, type); } void audit_core_dumps(long signr); static inline void audit_ptrace(struct task_struct *t) { if (unlikely(!audit_dummy_context())) __audit_ptrace(t); } /* Private API (for audit.c only) */ extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern void __audit_bprm(struct linux_binprm *bprm); extern int __audit_socketcall(int nargs, unsigned long *args); extern int __audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout); extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old); extern void __audit_log_capset(const struct cred *new, const struct cred *old); extern void __audit_mmap_fd(int fd, int flags); extern void __audit_openat2_how(struct open_how *how); extern void __audit_log_kern_module(char *name); extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar); extern void __audit_tk_injoffset(struct timespec64 offset); extern void __audit_ntp_log(const struct audit_ntp_data *ad); extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { if (unlikely(!audit_dummy_context())) __audit_ipc_obj(ipcp); } static inline void audit_fd_pair(int fd1, int fd2) { if (unlikely(!audit_dummy_context())) __audit_fd_pair(fd1, fd2); } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { if (unlikely(!audit_dummy_context())) __audit_ipc_set_perm(qbytes, uid, gid, mode); } static inline void audit_bprm(struct linux_binprm *bprm) { if (unlikely(!audit_dummy_context())) __audit_bprm(bprm); } static inline int audit_socketcall(int nargs, unsigned long *args) { if (unlikely(!audit_dummy_context())) return __audit_socketcall(nargs, args); return 0; } static inline int audit_socketcall_compat(int nargs, u32 *args) { unsigned long a[AUDITSC_ARGS]; int i; if (audit_dummy_context()) return 0; for (i = 0; i < nargs; i++) a[i] = (unsigned long)args[i]; return __audit_socketcall(nargs, a); } static inline int audit_sockaddr(int len, void *addr) { if (unlikely(!audit_dummy_context())) return __audit_sockaddr(len, addr); return 0; } static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { if (unlikely(!audit_dummy_context())) __audit_mq_open(oflag, mode, attr); } static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) { if (unlikely(!audit_dummy_context())) __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); } static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { if (unlikely(!audit_dummy_context())) __audit_mq_notify(mqdes, notification); } static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { if (unlikely(!audit_dummy_context())) __audit_mq_getsetattr(mqdes, mqstat); } static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { if (unlikely(!audit_dummy_context())) return __audit_log_bprm_fcaps(bprm, new, old); return 0; } static inline void audit_log_capset(const struct cred *new, const struct cred *old) { if (unlikely(!audit_dummy_context())) __audit_log_capset(new, old); } static inline void audit_mmap_fd(int fd, int flags) { if (unlikely(!audit_dummy_context())) __audit_mmap_fd(fd, flags); } static inline void audit_openat2_how(struct open_how *how) { if (unlikely(!audit_dummy_context())) __audit_openat2_how(how); } static inline void audit_log_kern_module(char *name) { if (!audit_dummy_context()) __audit_log_kern_module(name); } static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { if (!audit_dummy_context()) __audit_fanotify(response, friar); } static inline void audit_tk_injoffset(struct timespec64 offset) { /* ignore no-op events */ if (offset.tv_sec == 0 && offset.tv_nsec == 0) return; if (!audit_dummy_context()) __audit_tk_injoffset(offset); } static inline void audit_ntp_init(struct audit_ntp_data *ad) { memset(ad, 0, sizeof(*ad)); } static inline void audit_ntp_set_old(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { ad->vals[type].oldval = val; } static inline void audit_ntp_set_new(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { ad->vals[type].newval = val; } static inline void audit_ntp_log(const struct audit_ntp_data *ad) { if (!audit_dummy_context()) __audit_ntp_log(ad); } static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp) { if (audit_enabled) __audit_log_nfcfg(name, af, nentries, op, gfp); } extern int audit_n_rules; extern int audit_signals; #else /* CONFIG_AUDITSYSCALL */ static inline int audit_alloc(struct task_struct *task) { return 0; } static inline void audit_free(struct task_struct *task) { } static inline void audit_uring_entry(u8 op) { } static inline void audit_uring_exit(int success, long code) { } static inline void audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { } static inline void audit_syscall_exit(void *pt_regs) { } static inline bool audit_dummy_context(void) { return true; } static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) { } static inline struct audit_context *audit_context(void) { return NULL; } static inline struct filename *audit_reusename(const __user char *name) { return NULL; } static inline void audit_getname(struct filename *name) { } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { } static inline void audit_file(struct file *file) { } static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { } static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { } static inline void audit_core_dumps(long signr) { } static inline void audit_seccomp(unsigned long syscall, long signr, int code) { } static inline void audit_seccomp_actions_logged(const char *names, const char *old_names, int res) { } static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { } static inline void audit_bprm(struct linux_binprm *bprm) { } static inline int audit_socketcall(int nargs, unsigned long *args) { return 0; } static inline int audit_socketcall_compat(int nargs, u32 *args) { return 0; } static inline void audit_fd_pair(int fd1, int fd2) { } static inline int audit_sockaddr(int len, void *addr) { return 0; } static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { } static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) { } static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { } static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { } static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { return 0; } static inline void audit_log_capset(const struct cred *new, const struct cred *old) { } static inline void audit_mmap_fd(int fd, int flags) { } static inline void audit_openat2_how(struct open_how *how) { } static inline void audit_log_kern_module(char *name) { } static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { } static inline void audit_tk_injoffset(struct timespec64 offset) { } static inline void audit_ntp_init(struct audit_ntp_data *ad) { } static inline void audit_ntp_set_old(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { } static inline void audit_ntp_set_new(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { } static inline void audit_ntp_log(const struct audit_ntp_data *ad) { } static inline void audit_ptrace(struct task_struct *t) { } static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp) { } #define audit_n_rules 0 #define audit_signals 0 #endif /* CONFIG_AUDITSYSCALL */ static inline bool audit_loginuid_set(struct task_struct *tsk) { return uid_valid(audit_get_loginuid(tsk)); } #endif |
8 3 2 1 8 1 1 4 2 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for ELECOM devices: * - BM084 Bluetooth Mouse * - EX-G Trackballs (M-XT3DRBK, M-XT3URBK, M-XT4DRBK) * - DEFT Trackballs (M-DT1DRBK, M-DT1URBK, M-DT2DRBK, M-DT2URBK) * - HUGE Trackballs (M-HT1DRBK, M-HT1URBK) * * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org> * Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com> * Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red> * Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net> */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* * Certain ELECOM mice misreport their button count meaning that they only work * correctly with the ELECOM mouse assistant software which is unavailable for * Linux. A four extra INPUT reports and a FEATURE report are described by the * report descriptor but it does not appear that these enable software to * control what the extra buttons map to. The only simple and straightforward * solution seems to involve fixing up the report descriptor. */ #define MOUSE_BUTTONS_MAX 8 static void mouse_button_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int rsize, unsigned int button_bit_count, unsigned int padding_bit, unsigned int button_report_size, unsigned int button_usage_maximum, int nbuttons) { if (rsize < 32 || rdesc[button_bit_count] != 0x95 || rdesc[button_report_size] != 0x75 || rdesc[button_report_size + 1] != 0x01 || rdesc[button_usage_maximum] != 0x29 || rdesc[padding_bit] != 0x75) return; hid_info(hdev, "Fixing up Elecom mouse button count\n"); nbuttons = clamp(nbuttons, 0, MOUSE_BUTTONS_MAX); rdesc[button_bit_count + 1] = nbuttons; rdesc[button_usage_maximum + 1] = nbuttons; rdesc[padding_bit + 1] = MOUSE_BUTTONS_MAX - nbuttons; } static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { case USB_DEVICE_ID_ELECOM_BM084: /* The BM084 Bluetooth mouse includes a non-existing horizontal * wheel in the HID descriptor. */ if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); rdesc[47] = 0x00; } break; case USB_DEVICE_ID_ELECOM_M_XGL20DLBK: /* * Report descriptor format: * 20: button bit count * 28: padding bit count * 22: button report size * 14: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 20, 28, 22, 14, 8); break; case USB_DEVICE_ID_ELECOM_M_XT3URBK: case USB_DEVICE_ID_ELECOM_M_XT3DRBK: case USB_DEVICE_ID_ELECOM_M_XT4DRBK: /* * Report descriptor format: * 12: button bit count * 30: padding bit count * 14: button report size * 20: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 6); break; case USB_DEVICE_ID_ELECOM_M_DT1URBK: case USB_DEVICE_ID_ELECOM_M_DT1DRBK: case USB_DEVICE_ID_ELECOM_M_HT1URBK: case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D: /* * Report descriptor format: * 12: button bit count * 30: padding bit count * 14: button report size * 20: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8); break; case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C: /* * Report descriptor format: * 22: button bit count * 30: padding bit count * 24: button report size * 16: button usage maximum */ mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8); break; } return rdesc; } static const struct hid_device_id elecom_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) }, { } }; MODULE_DEVICE_TABLE(hid, elecom_devices); static struct hid_driver elecom_driver = { .name = "elecom", .id_table = elecom_devices, .report_fixup = elecom_report_fixup }; module_hid_driver(elecom_driver); MODULE_DESCRIPTION("HID driver for ELECOM devices"); MODULE_LICENSE("GPL"); |
2 2 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some a4tech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" #define A4_2WHEEL_MOUSE_HACK_7 0x01 #define A4_2WHEEL_MOUSE_HACK_B8 0x02 #define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8) struct a4tech_sc { unsigned long quirks; unsigned int hw_wheel; __s32 delayed_value; }; static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 && usage->hid == A4_WHEEL_ORIENTATION) { /* * We do not want to have this usage mapped to anything as it's * nonstandard and doesn't really behave like an HID report. * It's only selecting the orientation (vertical/horizontal) of * the previous mouse wheel report. The input_events will be * generated once both reports are recorded in a4_event(). */ return -1; } return 0; } static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) { set_bit(REL_HWHEEL, *bit); set_bit(REL_HWHEEL_HI_RES, *bit); } if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) return -1; return 0; } static int a4_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); struct input_dev *input; if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput) return 0; input = field->hidinput->input; if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) { if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) { a4->delayed_value = value; return 1; } if (usage->hid == A4_WHEEL_ORIENTATION) { input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, a4->delayed_value); input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES : REL_WHEEL_HI_RES, a4->delayed_value * 120); return 1; } } if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) { a4->hw_wheel = !!value; return 1; } if (usage->code == REL_WHEEL_HI_RES && a4->hw_wheel) { input_event(input, usage->type, REL_HWHEEL, value); input_event(input, usage->type, REL_HWHEEL_HI_RES, value * 120); return 1; } return 0; } static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct a4tech_sc *a4; int ret; a4 = devm_kzalloc(&hdev->dev, sizeof(*a4), GFP_KERNEL); if (a4 == NULL) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } a4->quirks = id->driver_data; hid_set_drvdata(hdev, a4); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } return 0; } static const struct hid_device_id a4_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU), .driver_data = A4_2WHEEL_MOUSE_HACK_7 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { } }; MODULE_DEVICE_TABLE(hid, a4_devices); static struct hid_driver a4_driver = { .name = "a4tech", .id_table = a4_devices, .input_mapping = a4_input_mapping, .input_mapped = a4_input_mapped, .event = a4_event, .probe = a4_probe, }; module_hid_driver(a4_driver); MODULE_DESCRIPTION("HID driver for some a4tech \"special\" devices"); MODULE_LICENSE("GPL"); |
1896 1901 1892 1896 1897 1897 1893 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | // SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * This is an implementation of the BLAKE2s hash and PRF functions. * * Information: https://blake2.net/ * */ #include <crypto/internal/blake2s.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/bug.h> #include <linux/unaligned.h> static const u8 blake2s_sigma[10][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, }; static inline void blake2s_increment_counter(struct blake2s_state *state, const u32 inc) { state->t[0] += inc; state->t[1] += (state->t[0] < inc); } void blake2s_compress(struct blake2s_state *state, const u8 *block, size_t nblocks, const u32 inc) __weak __alias(blake2s_compress_generic); void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, size_t nblocks, const u32 inc) { u32 m[16]; u32 v[16]; int i; WARN_ON(IS_ENABLED(DEBUG) && (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE)); while (nblocks > 0) { blake2s_increment_counter(state, inc); memcpy(m, block, BLAKE2S_BLOCK_SIZE); le32_to_cpu_array(m, ARRAY_SIZE(m)); memcpy(v, state->h, 32); v[ 8] = BLAKE2S_IV0; v[ 9] = BLAKE2S_IV1; v[10] = BLAKE2S_IV2; v[11] = BLAKE2S_IV3; v[12] = BLAKE2S_IV4 ^ state->t[0]; v[13] = BLAKE2S_IV5 ^ state->t[1]; v[14] = BLAKE2S_IV6 ^ state->f[0]; v[15] = BLAKE2S_IV7 ^ state->f[1]; #define G(r, i, a, b, c, d) do { \ a += b + m[blake2s_sigma[r][2 * i + 0]]; \ d = ror32(d ^ a, 16); \ c += d; \ b = ror32(b ^ c, 12); \ a += b + m[blake2s_sigma[r][2 * i + 1]]; \ d = ror32(d ^ a, 8); \ c += d; \ b = ror32(b ^ c, 7); \ } while (0) #define ROUND(r) do { \ G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ G(r, 2, v[2], v[ 6], v[10], v[14]); \ G(r, 3, v[3], v[ 7], v[11], v[15]); \ G(r, 4, v[0], v[ 5], v[10], v[15]); \ G(r, 5, v[1], v[ 6], v[11], v[12]); \ G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ } while (0) ROUND(0); ROUND(1); ROUND(2); ROUND(3); ROUND(4); ROUND(5); ROUND(6); ROUND(7); ROUND(8); ROUND(9); #undef G #undef ROUND for (i = 0; i < 8; ++i) state->h[i] ^= v[i] ^ v[i + 8]; block += BLAKE2S_BLOCK_SIZE; --nblocks; } } EXPORT_SYMBOL(blake2s_compress_generic); |
16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIMEKEEPING_H #define _LINUX_TIMEKEEPING_H #include <linux/errno.h> #include <linux/clocksource_ids.h> #include <linux/ktime.h> /* Included from linux/ktime.h */ void timekeeping_init(void); extern int timekeeping_suspended; /* Architecture timer tick functions: */ extern void legacy_timer_tick(unsigned long ticks); /* * Get and set timeofday */ extern int do_settimeofday64(const struct timespec64 *ts); extern int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz); /* * ktime_get() family - read the current time in a multitude of ways. * * The default time reference is CLOCK_MONOTONIC, starting at * boot time but not counting the time spent in suspend. * For other references, use the functions with "real", "clocktai", * "boottime" and "raw" suffixes. * * To get the time in a different format, use the ones with * "ns", "ts64" and "seconds" suffix. * * See Documentation/core-api/timekeeping.rst for more details. */ /* * timespec64 based interfaces */ extern void ktime_get_raw_ts64(struct timespec64 *ts); extern void ktime_get_ts64(struct timespec64 *ts); extern void ktime_get_real_ts64(struct timespec64 *tv); extern void ktime_get_coarse_ts64(struct timespec64 *ts); extern void ktime_get_coarse_real_ts64(struct timespec64 *ts); /* Multigrain timestamp interfaces */ extern void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts); extern void ktime_get_real_ts64_mg(struct timespec64 *ts); extern unsigned long timekeeping_get_mg_floor_swaps(void); void getboottime64(struct timespec64 *ts); /* * time64_t base interfaces */ extern time64_t ktime_get_seconds(void); extern time64_t __ktime_get_real_seconds(void); extern time64_t ktime_get_real_seconds(void); /* * ktime_t based interfaces */ enum tk_offsets { TK_OFFS_REAL, TK_OFFS_BOOT, TK_OFFS_TAI, TK_OFFS_MAX, }; extern ktime_t ktime_get(void); extern ktime_t ktime_get_with_offset(enum tk_offsets offs); extern ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs); extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); extern ktime_t ktime_get_raw(void); extern u32 ktime_get_resolution_ns(void); /** * ktime_get_real - get the real (wall-) time in ktime_t format * * Returns: real (wall) time in ktime_t format */ static inline ktime_t ktime_get_real(void) { return ktime_get_with_offset(TK_OFFS_REAL); } static inline ktime_t ktime_get_coarse_real(void) { return ktime_get_coarse_with_offset(TK_OFFS_REAL); } /** * ktime_get_boottime - Get monotonic time since boot in ktime_t format * * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the * time spent in suspend. * * Returns: monotonic time since boot in ktime_t format */ static inline ktime_t ktime_get_boottime(void) { return ktime_get_with_offset(TK_OFFS_BOOT); } static inline ktime_t ktime_get_coarse_boottime(void) { return ktime_get_coarse_with_offset(TK_OFFS_BOOT); } /** * ktime_get_clocktai - Get the TAI time of day in ktime_t format * * Returns: the TAI time of day in ktime_t format */ static inline ktime_t ktime_get_clocktai(void) { return ktime_get_with_offset(TK_OFFS_TAI); } static inline ktime_t ktime_get_coarse_clocktai(void) { return ktime_get_coarse_with_offset(TK_OFFS_TAI); } static inline ktime_t ktime_get_coarse(void) { struct timespec64 ts; ktime_get_coarse_ts64(&ts); return timespec64_to_ktime(ts); } static inline u64 ktime_get_coarse_ns(void) { return ktime_to_ns(ktime_get_coarse()); } static inline u64 ktime_get_coarse_real_ns(void) { return ktime_to_ns(ktime_get_coarse_real()); } static inline u64 ktime_get_coarse_boottime_ns(void) { return ktime_to_ns(ktime_get_coarse_boottime()); } static inline u64 ktime_get_coarse_clocktai_ns(void) { return ktime_to_ns(ktime_get_coarse_clocktai()); } /** * ktime_mono_to_real - Convert monotonic time to clock realtime * @mono: monotonic time to convert * * Returns: time converted to realtime clock */ static inline ktime_t ktime_mono_to_real(ktime_t mono) { return ktime_mono_to_any(mono, TK_OFFS_REAL); } /** * ktime_get_ns - Get the current time in nanoseconds * * Returns: current time converted to nanoseconds */ static inline u64 ktime_get_ns(void) { return ktime_to_ns(ktime_get()); } /** * ktime_get_real_ns - Get the current real/wall time in nanoseconds * * Returns: current real time converted to nanoseconds */ static inline u64 ktime_get_real_ns(void) { return ktime_to_ns(ktime_get_real()); } /** * ktime_get_boottime_ns - Get the monotonic time since boot in nanoseconds * * Returns: current boottime converted to nanoseconds */ static inline u64 ktime_get_boottime_ns(void) { return ktime_to_ns(ktime_get_boottime()); } /** * ktime_get_clocktai_ns - Get the current TAI time of day in nanoseconds * * Returns: current TAI time converted to nanoseconds */ static inline u64 ktime_get_clocktai_ns(void) { return ktime_to_ns(ktime_get_clocktai()); } /** * ktime_get_raw_ns - Get the raw monotonic time in nanoseconds * * Returns: current raw monotonic time converted to nanoseconds */ static inline u64 ktime_get_raw_ns(void) { return ktime_to_ns(ktime_get_raw()); } extern u64 ktime_get_mono_fast_ns(void); extern u64 ktime_get_raw_fast_ns(void); extern u64 ktime_get_boot_fast_ns(void); extern u64 ktime_get_tai_fast_ns(void); extern u64 ktime_get_real_fast_ns(void); /* * timespec64/time64_t interfaces utilizing the ktime based ones * for API completeness, these could be implemented more efficiently * if needed. */ static inline void ktime_get_boottime_ts64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_boottime()); } static inline void ktime_get_coarse_boottime_ts64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_coarse_boottime()); } static inline time64_t ktime_get_boottime_seconds(void) { return ktime_divns(ktime_get_coarse_boottime(), NSEC_PER_SEC); } static inline void ktime_get_clocktai_ts64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_clocktai()); } static inline void ktime_get_coarse_clocktai_ts64(struct timespec64 *ts) { *ts = ktime_to_timespec64(ktime_get_coarse_clocktai()); } static inline time64_t ktime_get_clocktai_seconds(void) { return ktime_divns(ktime_get_coarse_clocktai(), NSEC_PER_SEC); } /* * RTC specific */ extern bool timekeeping_rtc_skipsuspend(void); extern bool timekeeping_rtc_skipresume(void); extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); /** * struct system_time_snapshot - simultaneous raw/real time capture with * counter value * @cycles: Clocksource counter value to produce the system times * @real: Realtime system time * @boot: Boot time * @raw: Monotonic raw system time * @cs_id: Clocksource ID * @clock_was_set_seq: The sequence number of clock-was-set events * @cs_was_changed_seq: The sequence number of clocksource change events */ struct system_time_snapshot { u64 cycles; ktime_t real; ktime_t boot; ktime_t raw; enum clocksource_ids cs_id; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; }; /** * struct system_device_crosststamp - system/device cross-timestamp * (synchronized capture) * @device: Device time * @sys_realtime: Realtime simultaneous with device time * @sys_monoraw: Monotonic raw simultaneous with device time */ struct system_device_crosststamp { ktime_t device; ktime_t sys_realtime; ktime_t sys_monoraw; }; /** * struct system_counterval_t - system counter value with the ID of the * corresponding clocksource * @cycles: System counter value * @cs_id: Clocksource ID corresponding to system counter value. Used by * timekeeping code to verify comparability of two cycle values. * The default ID, CSID_GENERIC, does not identify a specific * clocksource. * @use_nsecs: @cycles is in nanoseconds. */ struct system_counterval_t { u64 cycles; enum clocksource_ids cs_id; bool use_nsecs; }; extern bool ktime_real_to_base_clock(ktime_t treal, enum clocksource_ids base_id, u64 *cycles); extern bool timekeeping_clocksource_has_base(enum clocksource_ids id); /* * Get cross timestamp between system clock and device clock */ extern int get_device_system_crosststamp( int (*get_time_fn)(ktime_t *device_time, struct system_counterval_t *system_counterval, void *ctx), void *ctx, struct system_time_snapshot *history, struct system_device_crosststamp *xtstamp); /* * Simultaneously snapshot realtime and monotonic raw clocks */ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); /* * Persistent clock related interfaces */ extern int persistent_clock_is_local; extern void read_persistent_clock64(struct timespec64 *ts); void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, struct timespec64 *boot_offset); #ifdef CONFIG_GENERIC_CMOS_UPDATE extern int update_persistent_clock64(struct timespec64 now); #endif #endif |
13 42 2 3231 3169 3235 3206 3209 4 53 30 3237 30 154 607 622 3258 37 3220 3268 3267 115 191 40 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_H #define __LINUX_USB_H #include <linux/mod_devicetable.h> #include <linux/usb/ch9.h> #define USB_MAJOR 180 #define USB_DEVICE_MAJOR 189 #ifdef __KERNEL__ #include <linux/errno.h> /* for -ENODEV */ #include <linux/delay.h> /* for mdelay() */ #include <linux/interrupt.h> /* for in_interrupt() */ #include <linux/list.h> /* for struct list_head */ #include <linux/kref.h> /* for struct kref */ #include <linux/device.h> /* for struct device */ #include <linux/fs.h> /* for struct file_operations */ #include <linux/completion.h> /* for struct completion */ #include <linux/sched.h> /* for current && schedule_timeout */ #include <linux/mutex.h> /* for struct mutex */ #include <linux/pm_runtime.h> /* for runtime PM */ struct usb_device; struct usb_driver; /*-------------------------------------------------------------------------*/ /* * Host-side wrappers for standard USB descriptors ... these are parsed * from the data provided by devices. Parsing turns them from a flat * sequence of descriptors into a hierarchy: * * - devices have one (usually) or more configs; * - configs have one (often) or more interfaces; * - interfaces have one (usually) or more settings; * - each interface setting has zero or (usually) more endpoints. * - a SuperSpeed endpoint has a companion descriptor * * And there might be other descriptors mixed in with those. * * Devices may also have class-specific or vendor-specific descriptors. */ struct ep_device; /** * struct usb_host_endpoint - host-side endpoint descriptor and queue * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint * @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint * @eusb2_isoc_ep_comp: eUSB2 isoc companion descriptor for this endpoint * @urb_list: urbs queued to this endpoint; maintained by usbcore * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH) * with one or more transfer descriptors (TDs) per urb * @ep_dev: ep_device for sysfs info * @extra: descriptors following this endpoint in the configuration * @extralen: how many bytes of "extra" are valid * @enabled: URBs may be submitted to this endpoint * @streams: number of USB-3 streams allocated on the endpoint * * USB requests are always queued to a given endpoint, identified by a * descriptor within an active interface in a given USB configuration. */ struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; struct usb_eusb2_isoc_ep_comp_descriptor eusb2_isoc_ep_comp; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; /* For sysfs info */ unsigned char *extra; /* Extra descriptors */ int extralen; int enabled; int streams; }; /* host-side wrapper for one interface setting's parsed descriptors */ struct usb_host_interface { struct usb_interface_descriptor desc; int extralen; unsigned char *extra; /* Extra descriptors */ /* array of desc.bNumEndpoints endpoints associated with this * interface setting. these will be in no particular order. */ struct usb_host_endpoint *endpoint; char *string; /* iInterface string, if present */ }; enum usb_interface_condition { USB_INTERFACE_UNBOUND = 0, USB_INTERFACE_BINDING, USB_INTERFACE_BOUND, USB_INTERFACE_UNBINDING, }; int __must_check usb_find_common_endpoints(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out); int __must_check usb_find_common_endpoints_reverse(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out); static inline int __must_check usb_find_bulk_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in) { return usb_find_common_endpoints(alt, bulk_in, NULL, NULL, NULL); } static inline int __must_check usb_find_bulk_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_out) { return usb_find_common_endpoints(alt, NULL, bulk_out, NULL, NULL); } static inline int __must_check usb_find_int_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_in) { return usb_find_common_endpoints(alt, NULL, NULL, int_in, NULL); } static inline int __must_check usb_find_int_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_out) { return usb_find_common_endpoints(alt, NULL, NULL, NULL, int_out); } static inline int __must_check usb_find_last_bulk_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in) { return usb_find_common_endpoints_reverse(alt, bulk_in, NULL, NULL, NULL); } static inline int __must_check usb_find_last_bulk_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_out) { return usb_find_common_endpoints_reverse(alt, NULL, bulk_out, NULL, NULL); } static inline int __must_check usb_find_last_int_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_in) { return usb_find_common_endpoints_reverse(alt, NULL, NULL, int_in, NULL); } static inline int __must_check usb_find_last_int_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_out) { return usb_find_common_endpoints_reverse(alt, NULL, NULL, NULL, int_out); } enum usb_wireless_status { USB_WIRELESS_STATUS_NA = 0, USB_WIRELESS_STATUS_DISCONNECTED, USB_WIRELESS_STATUS_CONNECTED, }; /** * struct usb_interface - what usb device drivers talk to * @altsetting: array of interface structures, one for each alternate * setting that may be selected. Each one includes a set of * endpoint configurations. They will be in no particular order. * @cur_altsetting: the current altsetting. * @num_altsetting: number of altsettings defined. * @intf_assoc: interface association descriptor * @minor: the minor number assigned to this interface, if this * interface is bound to a driver that uses the USB major number. * If this interface does not use the USB major, this field should * be unused. The driver should set this value in the probe() * function of the driver, after it has been assigned a minor * number from the USB core by calling usb_register_dev(). * @condition: binding state of the interface: not bound, binding * (in probe()), bound to a driver, or unbinding (in disconnect()) * @sysfs_files_created: sysfs attributes exist * @ep_devs_created: endpoint child pseudo-devices exist * @unregistering: flag set when the interface is being unregistered * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. * @needs_altsetting0: flag set when a set-interface request for altsetting 0 * has been deferred. * @needs_binding: flag set when the driver should be re-probed or unbound * following a reset or suspend operation it doesn't support. * @authorized: This allows to (de)authorize individual interfaces instead * a whole device in contrast to the device authorization. * @wireless_status: if the USB device uses a receiver/emitter combo, whether * the emitter is connected. * @wireless_status_work: Used for scheduling wireless status changes * from atomic context. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. * @reset_ws: Used for scheduling resets from atomic context. * @resetting_device: USB core reset the device, so use alt setting 0 as * current; needs bandwidth alloc after reset. * * USB device drivers attach to interfaces on a physical device. Each * interface encapsulates a single high level function, such as feeding * an audio stream to a speaker or reporting a change in a volume control. * Many USB devices only have one interface. The protocol used to talk to * an interface's endpoints can be defined in a usb "class" specification, * or by a product's vendor. The (default) control endpoint is part of * every interface, but is never listed among the interface's descriptors. * * The driver that is bound to the interface can use standard driver model * calls such as dev_get_drvdata() on the dev member of this structure. * * Each interface may have alternate settings. The initial configuration * of a device sets altsetting 0, but the device driver can change * that setting using usb_set_interface(). Alternate settings are often * used to control the use of periodic endpoints, such as by having * different endpoints use different amounts of reserved USB bandwidth. * All standards-conformant USB devices that use isochronous endpoints * will use them in non-default settings. * * The USB specification says that alternate setting numbers must run from * 0 to one less than the total number of alternate settings. But some * devices manage to mess this up, and the structures aren't necessarily * stored in numerical order anyhow. Use usb_altnum_to_altsetting() to * look up an alternate setting in the altsetting array based on its number. */ struct usb_interface { /* array of alternate settings for this interface, * stored in no particular order */ struct usb_host_interface *altsetting; struct usb_host_interface *cur_altsetting; /* the currently * active alternate setting */ unsigned num_altsetting; /* number of alternate settings */ /* If there is an interface association descriptor then it will list * the associated interfaces */ struct usb_interface_assoc_descriptor *intf_assoc; int minor; /* minor number this interface is * bound to */ enum usb_interface_condition condition; /* state of binding */ unsigned sysfs_files_created:1; /* the sysfs attributes exist */ unsigned ep_devs_created:1; /* endpoint "devices" exist */ unsigned unregistering:1; /* unregistration is in progress */ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ unsigned needs_binding:1; /* needs delayed unbind/rebind */ unsigned resetting_device:1; /* true: bandwidth alloc after reset */ unsigned authorized:1; /* used for interface authorization */ enum usb_wireless_status wireless_status; struct work_struct wireless_status_work; struct device dev; /* interface specific device info */ struct device *usb_dev; struct work_struct reset_ws; /* for resets in atomic context */ }; #define to_usb_interface(__dev) container_of_const(__dev, struct usb_interface, dev) static inline void *usb_get_intfdata(struct usb_interface *intf) { return dev_get_drvdata(&intf->dev); } /** * usb_set_intfdata() - associate driver-specific data with an interface * @intf: USB interface * @data: driver data * * Drivers can use this function in their probe() callbacks to associate * driver-specific data with an interface. * * Note that there is generally no need to clear the driver-data pointer even * if some drivers do so for historical or implementation-specific reasons. */ static inline void usb_set_intfdata(struct usb_interface *intf, void *data) { dev_set_drvdata(&intf->dev, data); } struct usb_interface *usb_get_intf(struct usb_interface *intf); void usb_put_intf(struct usb_interface *intf); /* Hard limit */ #define USB_MAXENDPOINTS 30 /* this maximum is arbitrary */ #define USB_MAXINTERFACES 32 #define USB_MAXIADS (USB_MAXINTERFACES/2) bool usb_check_bulk_endpoints( const struct usb_interface *intf, const u8 *ep_addrs); bool usb_check_int_endpoints( const struct usb_interface *intf, const u8 *ep_addrs); /* * USB Resume Timer: Every Host controller driver should drive the resume * signalling on the bus for the amount of time defined by this macro. * * That way we will have a 'stable' behavior among all HCDs supported by Linux. * * Note that the USB Specification states we should drive resume for *at least* * 20 ms, but it doesn't give an upper bound. This creates two possible * situations which we want to avoid: * * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes * us to fail USB Electrical Tests, thus failing Certification * * (b) Some (many) devices actually need more than 20 ms of resume signalling, * and while we can argue that's against the USB Specification, we don't have * control over which devices a certification laboratory will be using for * certification. If CertLab uses a device which was tested against Windows and * that happens to have relaxed resume signalling rules, we might fall into * situations where we fail interoperability and electrical tests. * * In order to avoid both conditions, we're using a 40 ms resume timeout, which * should cope with both LPJ calibration errors and devices not following every * detail of the USB Specification. */ #define USB_RESUME_TIMEOUT 40 /* ms */ /** * struct usb_interface_cache - long-term representation of a device interface * @num_altsetting: number of altsettings defined. * @ref: reference counter. * @altsetting: variable-length array of interface structures, one for * each alternate setting that may be selected. Each one includes a * set of endpoint configurations. They will be in no particular order. * * These structures persist for the lifetime of a usb_device, unlike * struct usb_interface (which persists only as long as its configuration * is installed). The altsetting arrays can be accessed through these * structures at any time, permitting comparison of configurations and * providing support for the /sys/kernel/debug/usb/devices pseudo-file. */ struct usb_interface_cache { unsigned num_altsetting; /* number of alternate settings */ struct kref ref; /* reference counter */ /* variable-length array of alternate settings for this interface, * stored in no particular order */ struct usb_host_interface altsetting[]; }; #define ref_to_usb_interface_cache(r) \ container_of(r, struct usb_interface_cache, ref) #define altsetting_to_usb_interface_cache(a) \ container_of(a, struct usb_interface_cache, altsetting[0]) /** * struct usb_host_config - representation of a device's configuration * @desc: the device's configuration descriptor. * @string: pointer to the cached version of the iConfiguration string, if * present for this configuration. * @intf_assoc: list of any interface association descriptors in this config * @interface: array of pointers to usb_interface structures, one for each * interface in the configuration. The number of interfaces is stored * in desc.bNumInterfaces. These pointers are valid only while the * configuration is active. * @intf_cache: array of pointers to usb_interface_cache structures, one * for each interface in the configuration. These structures exist * for the entire life of the device. * @extra: pointer to buffer containing all extra descriptors associated * with this configuration (those preceding the first interface * descriptor). * @extralen: length of the extra descriptors buffer. * * USB devices may have multiple configurations, but only one can be active * at any time. Each encapsulates a different operational environment; * for example, a dual-speed device would have separate configurations for * full-speed and high-speed operation. The number of configurations * available is stored in the device descriptor as bNumConfigurations. * * A configuration can contain multiple interfaces. Each corresponds to * a different function of the USB device, and all are available whenever * the configuration is active. The USB standard says that interfaces * are supposed to be numbered from 0 to desc.bNumInterfaces-1, but a lot * of devices get this wrong. In addition, the interface array is not * guaranteed to be sorted in numerical order. Use usb_ifnum_to_if() to * look up an interface entry based on its number. * * Device drivers should not attempt to activate configurations. The choice * of which configuration to install is a policy decision based on such * considerations as available power, functionality provided, and the user's * desires (expressed through userspace tools). However, drivers can call * usb_reset_configuration() to reinitialize the current configuration and * all its interfaces. */ struct usb_host_config { struct usb_config_descriptor desc; char *string; /* iConfiguration string, if present */ /* List of any Interface Association Descriptors in this * configuration. */ struct usb_interface_assoc_descriptor *intf_assoc[USB_MAXIADS]; /* the interfaces associated with this configuration, * stored in no particular order */ struct usb_interface *interface[USB_MAXINTERFACES]; /* Interface information available even when this is not the * active configuration */ struct usb_interface_cache *intf_cache[USB_MAXINTERFACES]; unsigned char *extra; /* Extra descriptors */ int extralen; }; /* USB2.0 and USB3.0 device BOS descriptor set */ struct usb_host_bos { struct usb_bos_descriptor *desc; struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; struct usb_ptm_cap_descriptor *ptm_cap; }; int __usb_get_extra_descriptor(char *buffer, unsigned size, unsigned char type, void **ptr, size_t min); #define usb_get_extra_descriptor(ifpoint, type, ptr) \ __usb_get_extra_descriptor((ifpoint)->extra, \ (ifpoint)->extralen, \ type, (void **)ptr, sizeof(**(ptr))) /* ----------------------------------------------------------------------- */ /* * Allocated per bus (tree of devices) we have: */ struct usb_bus { struct device *controller; /* host side hardware */ struct device *sysdev; /* as seen from firmware or bus */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ u8 uses_pio_for_control; /* * Does the host controller use PIO * for control transfers? */ u8 otg_port; /* 0, or number of OTG/HNP port */ unsigned is_b_host:1; /* true during some HNP roleswitches */ unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */ unsigned no_stop_on_short:1; /* * Quirk: some controllers don't stop * the ep queue on a short transfer * with the URB_SHORT_NOT_OK flag set. */ unsigned no_sg_constraint:1; /* no sg constraint */ unsigned sg_tablesize; /* 0 or largest number of sg list entries */ int devnum_next; /* Next open device number in * round-robin allocation */ struct mutex devnum_next_mutex; /* devnum_next mutex */ DECLARE_BITMAP(devmap, 128); /* USB device number allocation bitmap */ struct usb_device *root_hub; /* Root hub */ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ int bandwidth_allocated; /* on this bus: how much of the time * reserved for periodic (intr/iso) * requests is used, on average? * Units: microseconds/frame. * Limits: Full/low speed reserve 90%, * while high speed reserves 80%. */ int bandwidth_int_reqs; /* number of Interrupt requests */ int bandwidth_isoc_reqs; /* number of Isoc. requests */ unsigned resuming_ports; /* bit array: resuming root-hub ports */ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct mon_bus *mon_bus; /* non-null when associated */ int monitored; /* non-zero when monitored */ #endif }; struct usb_dev_state; /* ----------------------------------------------------------------------- */ struct usb_tt; enum usb_link_tunnel_mode { USB_LINK_UNKNOWN = 0, USB_LINK_NATIVE, USB_LINK_TUNNELED, }; enum usb_port_connect_type { USB_PORT_CONNECT_TYPE_UNKNOWN = 0, USB_PORT_CONNECT_TYPE_HOT_PLUG, USB_PORT_CONNECT_TYPE_HARD_WIRED, USB_PORT_NOT_USED, }; /* * USB port quirks. */ /* For the given port, prefer the old (faster) enumeration scheme. */ #define USB_PORT_QUIRK_OLD_SCHEME BIT(0) /* Decrease TRSTRCY to 10ms during device enumeration. */ #define USB_PORT_QUIRK_FAST_ENUM BIT(1) /* * USB 2.0 Link Power Management (LPM) parameters. */ struct usb2_lpm_parameters { /* Best effort service latency indicate how long the host will drive * resume on an exit from L1. */ unsigned int besl; /* Timeout value in microseconds for the L1 inactivity (LPM) timer. * When the timer counts to zero, the parent hub will initiate a LPM * transition to L1. */ int timeout; }; /* * USB 3.0 Link Power Management (LPM) parameters. * * PEL and SEL are USB 3.0 Link PM latencies for device-initiated LPM exit. * MEL is the USB 3.0 Link PM latency for host-initiated LPM exit. * All three are stored in nanoseconds. */ struct usb3_lpm_parameters { /* * Maximum exit latency (MEL) for the host to send a packet to the * device (either a Ping for isoc endpoints, or a data packet for * interrupt endpoints), the hubs to decode the packet, and for all hubs * in the path to transition the links to U0. */ unsigned int mel; /* * Maximum exit latency for a device-initiated LPM transition to bring * all links into U0. Abbreviated as "PEL" in section 9.4.12 of the USB * 3.0 spec, with no explanation of what "P" stands for. "Path"? */ unsigned int pel; /* * The System Exit Latency (SEL) includes PEL, and three other * latencies. After a device initiates a U0 transition, it will take * some time from when the device sends the ERDY to when it will finally * receive the data packet. Basically, SEL should be the worse-case * latency from when a device starts initiating a U0 transition to when * it will get data. */ unsigned int sel; /* * The idle timeout value that is currently programmed into the parent * hub for this device. When the timer counts to zero, the parent hub * will initiate an LPM transition to either U1 or U2. */ int timeout; }; /** * struct usb_device - kernel's representation of a USB device * @devnum: device number; address on a USB bus * @devpath: device ID string for use in messages (e.g., /port/...) * @route: tree topology hex string for use with xHCI * @state: device state: configured, not attached, etc. * @speed: device speed: high/full/low (or error) * @rx_lanes: number of rx lanes in use, USB 3.2 adds dual-lane support * @tx_lanes: number of tx lanes in use, USB 3.2 adds dual-lane support * @ssp_rate: SuperSpeed Plus phy signaling rate and lane count * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub * @ttport: device port on that tt hub * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints * @parent: our hub, unless we're the root * @bus: bus we're part of * @ep0: endpoint 0 data (default control pipe) * @dev: generic device interface * @descriptor: USB device descriptor * @bos: USB device BOS descriptor set * @config: all of the device's configs * @actconfig: the active configuration * @ep_in: array of IN endpoints * @ep_out: array of OUT endpoints * @rawdescriptors: raw descriptors for each config * @bus_mA: Current available from the bus * @portnum: parent port number (origin 1) * @level: number of USB hub ancestors * @devaddr: device address, XHCI: assigned by HW, others: same as devnum * @can_submit: URBs may be submitted * @persist_enabled: USB_PERSIST enabled for this device * @reset_in_progress: the device is being reset * @have_langid: whether string_langid is valid * @authorized: policy has said we can use it; * (user space) policy determines if we authorize this device to be * used or not. By default, wired USB devices are authorized. * WUSB devices are not, until we authorize them from user space. * FIXME -- complete doc * @authenticated: Crypto authentication passed * @tunnel_mode: Connection native or tunneled over USB4 * @lpm_capable: device supports LPM * @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled * @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled * @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled * @string_langid: language ID for strings * @product: iProduct string, if present (static) * @manufacturer: iManufacturer string, if present (static) * @serial: iSerialNumber string, if present (static) * @filelist: usbfs files that are open to this device * @maxchild: number of ports if hub * @quirks: quirks of the whole device * @urbnum: number of URBs submitted for the whole device * @active_duration: total time device is not suspended * @connect_time: time device was first connected * @do_remote_wakeup: remote wakeup should be enabled * @reset_resume: needs reset instead of resume * @port_is_suspended: the upstream port is suspended (L2 or U3) * @slot_id: Slot ID assigned by xHCI * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout. * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout. * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout. * @lpm_disable_count: Ref count used by usb_disable_lpm() and usb_enable_lpm() * to keep track of the number of functions that require USB 3.0 Link Power * Management to be disabled for this usb_device. This count should only * be manipulated by those functions, with the bandwidth_mutex is held. * @hub_delay: cached value consisting of: * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) * Will be used as wValue for SetIsochDelay requests. * @use_generic_driver: ask driver core to reprobe using the generic driver. * * Notes: * Usbcore drivers should not set usbdev->state directly. Instead use * usb_set_device_state(). */ struct usb_device { int devnum; char devpath[16]; u32 route; enum usb_device_state state; enum usb_device_speed speed; unsigned int rx_lanes; unsigned int tx_lanes; enum usb_ssp_rate ssp_rate; struct usb_tt *tt; int ttport; unsigned int toggle[2]; struct usb_device *parent; struct usb_bus *bus; struct usb_host_endpoint ep0; struct device dev; struct usb_device_descriptor descriptor; struct usb_host_bos *bos; struct usb_host_config *config; struct usb_host_config *actconfig; struct usb_host_endpoint *ep_in[16]; struct usb_host_endpoint *ep_out[16]; char **rawdescriptors; unsigned short bus_mA; u8 portnum; u8 level; u8 devaddr; unsigned can_submit:1; unsigned persist_enabled:1; unsigned reset_in_progress:1; unsigned have_langid:1; unsigned authorized:1; unsigned authenticated:1; unsigned lpm_capable:1; unsigned lpm_devinit_allow:1; unsigned usb2_hw_lpm_capable:1; unsigned usb2_hw_lpm_besl_capable:1; unsigned usb2_hw_lpm_enabled:1; unsigned usb2_hw_lpm_allowed:1; unsigned usb3_lpm_u1_enabled:1; unsigned usb3_lpm_u2_enabled:1; int string_langid; /* static strings from the device */ char *product; char *manufacturer; char *serial; struct list_head filelist; int maxchild; u32 quirks; atomic_t urbnum; unsigned long active_duration; unsigned long connect_time; unsigned do_remote_wakeup:1; unsigned reset_resume:1; unsigned port_is_suspended:1; enum usb_link_tunnel_mode tunnel_mode; int slot_id; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned lpm_disable_count; u16 hub_delay; unsigned use_generic_driver:1; }; #define to_usb_device(__dev) container_of_const(__dev, struct usb_device, dev) static inline struct usb_device *__intf_to_usbdev(struct usb_interface *intf) { return to_usb_device(intf->dev.parent); } static inline const struct usb_device *__intf_to_usbdev_const(const struct usb_interface *intf) { return to_usb_device((const struct device *)intf->dev.parent); } #define interface_to_usbdev(intf) \ _Generic((intf), \ const struct usb_interface *: __intf_to_usbdev_const, \ struct usb_interface *: __intf_to_usbdev)(intf) extern struct usb_device *usb_get_dev(struct usb_device *dev); extern void usb_put_dev(struct usb_device *dev); extern struct usb_device *usb_hub_find_child(struct usb_device *hdev, int port1); /** * usb_hub_for_each_child - iterate over all child devices on the hub * @hdev: USB device belonging to the usb hub * @port1: portnum associated with child device * @child: child device pointer */ #define usb_hub_for_each_child(hdev, port1, child) \ for (port1 = 1, child = usb_hub_find_child(hdev, port1); \ port1 <= hdev->maxchild; \ child = usb_hub_find_child(hdev, ++port1)) \ if (!child) continue; else /* USB device locking */ #define usb_lock_device(udev) device_lock(&(udev)->dev) #define usb_unlock_device(udev) device_unlock(&(udev)->dev) #define usb_lock_device_interruptible(udev) device_lock_interruptible(&(udev)->dev) #define usb_trylock_device(udev) device_trylock(&(udev)->dev) extern int usb_lock_device_for_reset(struct usb_device *udev, const struct usb_interface *iface); /* USB port reset for device reinitialization */ extern int usb_reset_device(struct usb_device *dev); extern void usb_queue_reset_device(struct usb_interface *dev); extern struct device *usb_intf_get_dma_device(struct usb_interface *intf); #ifdef CONFIG_ACPI extern int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable); extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index); extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index); #else static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) { return 0; } static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) { return true; } static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index) { return 0; } #endif /* USB autosuspend and autoresume */ #ifdef CONFIG_PM extern void usb_enable_autosuspend(struct usb_device *udev); extern void usb_disable_autosuspend(struct usb_device *udev); extern int usb_autopm_get_interface(struct usb_interface *intf); extern void usb_autopm_put_interface(struct usb_interface *intf); extern int usb_autopm_get_interface_async(struct usb_interface *intf); extern void usb_autopm_put_interface_async(struct usb_interface *intf); extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf); extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf); static inline void usb_mark_last_busy(struct usb_device *udev) { pm_runtime_mark_last_busy(&udev->dev); } #else static inline void usb_enable_autosuspend(struct usb_device *udev) { } static inline void usb_disable_autosuspend(struct usb_device *udev) { } static inline int usb_autopm_get_interface(struct usb_interface *intf) { return 0; } static inline int usb_autopm_get_interface_async(struct usb_interface *intf) { return 0; } static inline void usb_autopm_put_interface(struct usb_interface *intf) { } static inline void usb_autopm_put_interface_async(struct usb_interface *intf) { } static inline void usb_autopm_get_interface_no_resume( struct usb_interface *intf) { } static inline void usb_autopm_put_interface_no_suspend( struct usb_interface *intf) { } static inline void usb_mark_last_busy(struct usb_device *udev) { } #endif extern int usb_disable_lpm(struct usb_device *udev); extern void usb_enable_lpm(struct usb_device *udev); /* Same as above, but these functions lock/unlock the bandwidth_mutex. */ extern int usb_unlocked_disable_lpm(struct usb_device *udev); extern void usb_unlocked_enable_lpm(struct usb_device *udev); extern int usb_disable_ltm(struct usb_device *udev); extern void usb_enable_ltm(struct usb_device *udev); static inline bool usb_device_supports_ltm(struct usb_device *udev) { if (udev->speed < USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) return false; return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT; } static inline bool usb_device_no_sg_constraint(struct usb_device *udev) { return udev && udev->bus && udev->bus->no_sg_constraint; } /*-------------------------------------------------------------------------*/ /* for drivers using iso endpoints */ extern int usb_get_current_frame_number(struct usb_device *usb_dev); /* Sets up a group of bulk endpoints to support multiple stream IDs. */ extern int usb_alloc_streams(struct usb_interface *interface, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int num_streams, gfp_t mem_flags); /* Reverts a group of bulk endpoints back to not using stream IDs. */ extern int usb_free_streams(struct usb_interface *interface, struct usb_host_endpoint **eps, unsigned int num_eps, gfp_t mem_flags); /* used these for multi-interface device registration */ extern int usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void *data); /** * usb_interface_claimed - returns true iff an interface is claimed * @iface: the interface being checked * * Return: %true (nonzero) iff the interface is claimed, else %false * (zero). * * Note: * Callers must own the driver model's usb bus readlock. So driver * probe() entries don't need extra locking, but other call contexts * may need to explicitly claim that lock. * */ static inline int usb_interface_claimed(struct usb_interface *iface) { return (iface->dev.driver != NULL); } extern void usb_driver_release_interface(struct usb_driver *driver, struct usb_interface *iface); int usb_set_wireless_status(struct usb_interface *iface, enum usb_wireless_status status); const struct usb_device_id *usb_match_id(struct usb_interface *interface, const struct usb_device_id *id); extern int usb_match_one_id(struct usb_interface *interface, const struct usb_device_id *id); extern int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *)); extern struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor); extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, unsigned ifnum); extern struct usb_host_interface *usb_altnum_to_altsetting( const struct usb_interface *intf, unsigned int altnum); extern struct usb_host_interface *usb_find_alt_setting( struct usb_host_config *config, unsigned int iface_num, unsigned int alt_num); /* port claiming functions */ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner); int usb_hub_release_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner); /** * usb_make_path - returns stable device path in the usb tree * @dev: the device whose path is being constructed * @buf: where to put the string * @size: how big is "buf"? * * Return: Length of the string (> 0) or negative if size was too small. * * Note: * This identifier is intended to be "stable", reflecting physical paths in * hardware such as physical bus addresses for host controllers or ports on * USB hubs. That makes it stay the same until systems are physically * reconfigured, by re-cabling a tree of USB devices or by moving USB host * controllers. Adding and removing devices, including virtual root hubs * in host controller driver modules, does not change these path identifiers; * neither does rebooting or re-enumerating. These are more useful identifiers * than changeable ("unstable") ones like bus numbers or device addresses. * * With a partial exception for devices connected to USB 2.0 root hubs, these * identifiers are also predictable. So long as the device tree isn't changed, * plugging any USB device into a given hub port always gives it the same path. * Because of the use of "companion" controllers, devices connected to ports on * USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are * high speed, and a different one if they are full or low speed. */ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size) { int actual; actual = snprintf(buf, size, "usb-%s-%s", dev->bus->bus_name, dev->devpath); return (actual >= (int)size) ? -1 : actual; } /*-------------------------------------------------------------------------*/ #define USB_DEVICE_ID_MATCH_DEVICE \ (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT) #define USB_DEVICE_ID_MATCH_DEV_RANGE \ (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI) #define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE) #define USB_DEVICE_ID_MATCH_DEV_INFO \ (USB_DEVICE_ID_MATCH_DEV_CLASS | \ USB_DEVICE_ID_MATCH_DEV_SUBCLASS | \ USB_DEVICE_ID_MATCH_DEV_PROTOCOL) #define USB_DEVICE_ID_MATCH_INT_INFO \ (USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_SUBCLASS | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL) /** * USB_DEVICE - macro used to describe a specific usb device * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * * This macro is used to create a struct usb_device_id that matches a * specific device. */ #define USB_DEVICE(vend, prod) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod) /** * USB_DEVICE_VER - describe a specific usb device with a version range * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @lo: the bcdDevice_lo value * @hi: the bcdDevice_hi value * * This macro is used to create a struct usb_device_id that matches a * specific device, with a version range. */ #define USB_DEVICE_VER(vend, prod, lo, hi) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, \ .idVendor = (vend), \ .idProduct = (prod), \ .bcdDevice_lo = (lo), \ .bcdDevice_hi = (hi) /** * USB_DEVICE_INTERFACE_CLASS - describe a usb device with a specific interface class * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @cl: bInterfaceClass value * * This macro is used to create a struct usb_device_id that matches a * specific interface class of devices. */ #define USB_DEVICE_INTERFACE_CLASS(vend, prod, cl) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceClass = (cl) /** * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific interface protocol of devices. */ #define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceProtocol = (pr) /** * USB_DEVICE_INTERFACE_NUMBER - describe a usb device with a specific interface number * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @num: bInterfaceNumber value * * This macro is used to create a struct usb_device_id that matches a * specific interface number of devices. */ #define USB_DEVICE_INTERFACE_NUMBER(vend, prod, num) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_NUMBER, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceNumber = (num) /** * USB_DEVICE_INFO - macro used to describe a class of usb devices * @cl: bDeviceClass value * @sc: bDeviceSubClass value * @pr: bDeviceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific class of devices. */ #define USB_DEVICE_INFO(cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, \ .bDeviceClass = (cl), \ .bDeviceSubClass = (sc), \ .bDeviceProtocol = (pr) /** * USB_INTERFACE_INFO - macro used to describe a class of usb interfaces * @cl: bInterfaceClass value * @sc: bInterfaceSubClass value * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific class of interfaces. */ #define USB_INTERFACE_INFO(cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO, \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) /** * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @cl: bInterfaceClass value * @sc: bInterfaceSubClass value * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific device with a specific class of interfaces. * * This is especially useful when explicitly matching devices that have * vendor specific bDeviceClass values, but standards-compliant interfaces. */ #define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ | USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) /** * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces * @vend: the 16 bit USB Vendor ID * @cl: bInterfaceClass value * @sc: bInterfaceSubClass value * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific vendor with a specific class of interfaces. * * This is especially useful when explicitly matching devices that have * vendor specific bDeviceClass values, but standards-compliant interfaces. */ #define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ | USB_DEVICE_ID_MATCH_VENDOR, \ .idVendor = (vend), \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) /* ----------------------------------------------------------------------- */ /* Stuff for dynamic usb ids */ extern struct mutex usb_dynids_lock; struct usb_dynids { struct list_head list; }; struct usb_dynid { struct list_head node; struct usb_device_id id; }; extern ssize_t usb_store_new_id(struct usb_dynids *dynids, const struct usb_device_id *id_table, struct device_driver *driver, const char *buf, size_t count); extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf); /** * struct usb_driver - identifies USB interface driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. * @probe: Called to see if the driver is willing to manage a particular * interface on a device. If it is, probe returns zero and uses * usb_set_intfdata() to associate driver-specific data with the * interface. It may also use usb_set_interface() to specify the * appropriate altsetting. If unwilling to manage the interface, * return -ENODEV, if genuine IO errors occurred, an appropriate * negative errno value. * @disconnect: Called when the interface is no longer accessible, usually * because its device has been (or is being) disconnected or the * driver module is being unloaded. * @unlocked_ioctl: Used for drivers that want to talk to userspace through * the "usbfs" filesystem. This lets devices provide ways to * expose information to user space regardless of where they * do (or don't) show up otherwise in the filesystem. * @suspend: Called when the device is going to be suspended by the * system either from system sleep or runtime suspend context. The * return value will be ignored in system sleep context, so do NOT * try to continue using the device if suspend fails in this case. * Instead, let the resume or reset-resume routine recover from * the failure. * @resume: Called when the device is being resumed by the system. * @reset_resume: Called when the suspended device has been reset instead * of being resumed. * @pre_reset: Called by usb_reset_device() when the device is about to be * reset. This routine must not return until the driver has no active * URBs for the device, and no more URBs may be submitted until the * post_reset method is called. * @post_reset: Called by usb_reset_device() after the device * has been reset * @shutdown: Called at shut-down time to quiesce the device. * @id_table: USB drivers use ID table to support hotplugging. * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set * or your driver's probe function will never get called. * @dev_groups: Attributes attached to the device that will be created once it * is bound to the driver. * @dynids: used internally to hold the list of dynamically added device * ids for this driver. * @driver: The driver-model core driver structure. * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be * added to this driver by preventing the sysfs file from being created. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for interfaces bound to this driver. * @soft_unbind: if set to 1, the USB core will not kill URBs and disable * endpoints before calling the driver's disconnect method. * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs * to initiate lower power link state transitions when an idle timeout * occurs. Device-initiated USB 3.0 link PM will still be allowed. * * USB interface drivers must provide a name, probe() and disconnect() * methods, and an id_table. Other driver fields are optional. * * The id_table is used in hotplugging. It holds a set of descriptors, * and specialized data may be associated with each entry. That table * is used by both user and kernel mode hotplugging support. * * The probe() and disconnect() methods are called in a context where * they can sleep, but they should avoid abusing the privilege. Most * work to connect to a device should be done when the device is opened, * and undone at the last close. The disconnect code needs to address * concurrency issues with respect to open() and close() methods, as * well as forcing all pending I/O requests to complete (by unlinking * them as necessary, and blocking until the unlinks complete). */ struct usb_driver { const char *name; int (*probe) (struct usb_interface *intf, const struct usb_device_id *id); void (*disconnect) (struct usb_interface *intf); int (*unlocked_ioctl) (struct usb_interface *intf, unsigned int code, void *buf); int (*suspend) (struct usb_interface *intf, pm_message_t message); int (*resume) (struct usb_interface *intf); int (*reset_resume)(struct usb_interface *intf); int (*pre_reset)(struct usb_interface *intf); int (*post_reset)(struct usb_interface *intf); void (*shutdown)(struct usb_interface *intf); const struct usb_device_id *id_table; const struct attribute_group **dev_groups; struct usb_dynids dynids; struct device_driver driver; unsigned int no_dynamic_id:1; unsigned int supports_autosuspend:1; unsigned int disable_hub_initiated_lpm:1; unsigned int soft_unbind:1; }; #define to_usb_driver(d) container_of_const(d, struct usb_driver, driver) /** * struct usb_device_driver - identifies USB device driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. * @match: If set, used for better device/driver matching. * @probe: Called to see if the driver is willing to manage a particular * device. If it is, probe returns zero and uses dev_set_drvdata() * to associate driver-specific data with the device. If unwilling * to manage the device, return a negative errno value. * @disconnect: Called when the device is no longer accessible, usually * because it has been (or is being) disconnected or the driver's * module is being unloaded. * @suspend: Called when the device is going to be suspended by the system. * @resume: Called when the device is being resumed by the system. * @choose_configuration: If non-NULL, called instead of the default * usb_choose_configuration(). If this returns an error then we'll go * on to call the normal usb_choose_configuration(). * @dev_groups: Attributes attached to the device that will be created once it * is bound to the driver. * @driver: The driver-model core driver structure. * @id_table: used with @match() to select better matching driver at * probe() time. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. * @generic_subclass: if set to 1, the generic USB driver's probe, disconnect, * resume and suspend functions will be called in addition to the driver's * own, so this part of the setup does not need to be replicated. * * USB drivers must provide all the fields listed above except driver, * match, and id_table. */ struct usb_device_driver { const char *name; bool (*match) (struct usb_device *udev); int (*probe) (struct usb_device *udev); void (*disconnect) (struct usb_device *udev); int (*suspend) (struct usb_device *udev, pm_message_t message); int (*resume) (struct usb_device *udev, pm_message_t message); int (*choose_configuration) (struct usb_device *udev); const struct attribute_group **dev_groups; struct device_driver driver; const struct usb_device_id *id_table; unsigned int supports_autosuspend:1; unsigned int generic_subclass:1; }; #define to_usb_device_driver(d) container_of_const(d, struct usb_device_driver, driver) /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number * @name: the usb class device name for this driver. Will show up in sysfs. * @devnode: Callback to provide a naming hint for a possible * device node to create. * @fops: pointer to the struct file_operations of this driver. * @minor_base: the start of the minor range for this driver. * * This structure is used for the usb_register_dev() and * usb_deregister_dev() functions, to consolidate a number of the * parameters used for them. */ struct usb_class_driver { char *name; char *(*devnode)(const struct device *dev, umode_t *mode); const struct file_operations *fops; int minor_base; }; /* * use these in module_init()/module_exit() * and don't forget MODULE_DEVICE_TABLE(usb, ...) */ extern int usb_register_driver(struct usb_driver *, struct module *, const char *); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define usb_register(driver) \ usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void usb_deregister(struct usb_driver *); /** * module_usb_driver() - Helper macro for registering a USB driver * @__usb_driver: usb_driver struct * * Helper macro for USB drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_usb_driver(__usb_driver) \ module_driver(__usb_driver, usb_register, \ usb_deregister) extern int usb_register_device_driver(struct usb_device_driver *, struct module *); extern void usb_deregister_device_driver(struct usb_device_driver *); extern int usb_register_dev(struct usb_interface *intf, struct usb_class_driver *class_driver); extern void usb_deregister_dev(struct usb_interface *intf, struct usb_class_driver *class_driver); extern int usb_disabled(void); /* ----------------------------------------------------------------------- */ /* * URB support, for asynchronous request completions */ /* * urb->transfer_flags: * * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb(). */ #define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */ #define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired * slot in the schedule */ #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt * needed */ #define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */ /* The following flags are used internally by usbcore and HCDs */ #define URB_DIR_IN 0x0200 /* Transfer from device to host */ #define URB_DIR_OUT 0 #define URB_DIR_MASK URB_DIR_IN #define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */ #define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */ #define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */ #define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */ #define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */ #define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */ #define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */ #define URB_ALIGNED_TEMP_BUFFER 0x00800000 /* Temp buffer was alloc'd */ struct usb_iso_packet_descriptor { unsigned int offset; unsigned int length; /* expected length */ unsigned int actual_length; int status; }; struct urb; struct usb_anchor { struct list_head urb_list; wait_queue_head_t wait; spinlock_t lock; atomic_t suspend_wakeups; unsigned int poisoned:1; }; static inline void init_usb_anchor(struct usb_anchor *anchor) { memset(anchor, 0, sizeof(*anchor)); INIT_LIST_HEAD(&anchor->urb_list); init_waitqueue_head(&anchor->wait); spin_lock_init(&anchor->lock); } typedef void (*usb_complete_t)(struct urb *); /** * struct urb - USB Request Block * @urb_list: For use by current owner of the URB. * @anchor_list: membership in the list of an anchor * @anchor: to anchor URBs to a common mooring * @ep: Points to the endpoint's data structure. Will eventually * replace @pipe. * @pipe: Holds endpoint number, direction, type, and more. * Create these values with the eight macros available; * usb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is "ctrl" * (control), "bulk", "int" (interrupt), or "iso" (isochronous). * For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint * numbers range from zero to fifteen. Note that "in" endpoint two * is a different endpoint (and pipe) from "out" endpoint two. * The current configuration controls the existence, type, and * maximum packet size of any given endpoint. * @stream_id: the endpoint's stream ID for bulk streams * @dev: Identifies the USB device to perform the request. * @status: This is read in non-iso completion functions to get the * status of the particular request. ISO requests only use it * to tell whether the URB was unlinked; detailed status for * each frame is in the fields of the iso_frame-desc. * @transfer_flags: A variety of flags may be used to affect how URB * submission, unlinking, or operation are handled. Different * kinds of URB can use different flags. * @transfer_buffer: This identifies the buffer to (or from) which the I/O * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set * (however, do not leave garbage in transfer_buffer even then). * This buffer must be suitable for DMA; allocate it with * kmalloc() or equivalent. For transfers to "in" endpoints, contents * of this buffer will be modified. This buffer is used for the data * stage of control transfers. * @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP, * the device driver is saying that it provided this DMA address, * which the host controller driver should use in preference to the * transfer_buffer. * @sg: scatter gather buffer list, the buffer size of each element in * the list (except the last) must be divisible by the endpoint's * max packet size if no_sg_constraint isn't set in 'struct usb_bus' * @num_mapped_sgs: (internal) number of mapped sg entries * @num_sgs: number of entries in the sg list * @transfer_buffer_length: How big is transfer_buffer. The transfer may * be broken up into chunks according to the current maximum packet * size for the endpoint, which is a function of the configuration * and is encoded in the pipe. When the length is zero, neither * transfer_buffer nor transfer_dma is used. * @actual_length: This is read in non-iso completion functions, and * it tells how many bytes (out of transfer_buffer_length) were * transferred. It will normally be the same as requested, unless * either an error was reported or a short read was performed. * The URB_SHORT_NOT_OK transfer flag may be used to make such * short reads be reported as errors. * @setup_packet: Only used for control transfers, this points to eight bytes * of setup data. Control transfers always start by sending this data * to the device. Then transfer_buffer is read or written, if needed. * @setup_dma: DMA pointer for the setup packet. The caller must not use * this field; setup_packet must point to a valid buffer. * @start_frame: Returns the initial frame for isochronous transfers. * @number_of_packets: Lists the number of ISO transfer buffers. * @interval: Specifies the polling interval for interrupt or isochronous * transfers. The units are frames (milliseconds) for full and low * speed devices, and microframes (1/8 millisecond) for highspeed * and SuperSpeed devices. * @error_count: Returns the number of ISO transfers that reported errors. * @context: For use in completion functions. This normally points to * request-specific driver context. * @complete: Completion handler. This URB is passed as the parameter to the * completion function. The completion function may then do what * it likes with the URB, including resubmitting or freeing it. * @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to * collect the transfer status for each buffer. * * This structure identifies USB transfer requests. URBs must be allocated by * calling usb_alloc_urb() and freed with a call to usb_free_urb(). * Initialization may be done using various usb_fill_*_urb() functions. URBs * are submitted using usb_submit_urb(), and pending requests may be canceled * using usb_unlink_urb() or usb_kill_urb(). * * Data Transfer Buffers: * * Normally drivers provide I/O buffers allocated with kmalloc() or otherwise * taken from the general page pool. That is provided by transfer_buffer * (control requests also use setup_packet), and host controller drivers * perform a dma mapping (and unmapping) for each buffer transferred. Those * mapping operations can be expensive on some platforms (perhaps using a dma * bounce buffer or talking to an IOMMU), * although they're cheap on commodity x86 and ppc hardware. * * Alternatively, drivers may pass the URB_NO_TRANSFER_DMA_MAP transfer flag, * which tells the host controller driver that no such mapping is needed for * the transfer_buffer since * the device driver is DMA-aware. For example, a device driver might * allocate a DMA buffer with usb_alloc_coherent() or call usb_buffer_map(). * When this transfer flag is provided, host controller drivers will * attempt to use the dma address found in the transfer_dma * field rather than determining a dma address themselves. * * Note that transfer_buffer must still be set if the controller * does not support DMA (as indicated by hcd_uses_dma()) and when talking * to root hub. If you have to transfer between highmem zone and the device * on such controller, create a bounce buffer or bail out with an error. * If transfer_buffer cannot be set (is in highmem) and the controller is DMA * capable, assign NULL to it, so that usbmon knows not to use the value. * The setup_packet must always be set, so it cannot be located in highmem. * * Initialization: * * All URBs submitted must initialize the dev, pipe, transfer_flags (may be * zero), and complete fields. All URBs must also initialize * transfer_buffer and transfer_buffer_length. They may provide the * URB_SHORT_NOT_OK transfer flag, indicating that short reads are * to be treated as errors; that flag is invalid for write requests. * * Bulk URBs may * use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers * should always terminate with a short packet, even if it means adding an * extra zero length packet. * * Control URBs must provide a valid pointer in the setup_packet field. * Unlike the transfer_buffer, the setup_packet may not be mapped for DMA * beforehand. * * Interrupt URBs must provide an interval, saying how often (in milliseconds * or, for highspeed devices, 125 microsecond units) * to poll for transfers. After the URB has been submitted, the interval * field reflects how the transfer was actually scheduled. * The polling interval may be more frequent than requested. * For example, some controllers have a maximum interval of 32 milliseconds, * while others support intervals of up to 1024 milliseconds. * Isochronous URBs also have transfer intervals. (Note that for isochronous * endpoints, as well as high speed interrupt endpoints, the encoding of * the transfer interval in the endpoint descriptor is logarithmic. * Device drivers must convert that value to linear units themselves.) * * If an isochronous endpoint queue isn't already running, the host * controller will schedule a new URB to start as soon as bandwidth * utilization allows. If the queue is running then a new URB will be * scheduled to start in the first transfer slot following the end of the * preceding URB, if that slot has not already expired. If the slot has * expired (which can happen when IRQ delivery is delayed for a long time), * the scheduling behavior depends on the URB_ISO_ASAP flag. If the flag * is clear then the URB will be scheduled to start in the expired slot, * implying that some of its packets will not be transferred; if the flag * is set then the URB will be scheduled in the first unexpired slot, * breaking the queue's synchronization. Upon URB completion, the * start_frame field will be set to the (micro)frame number in which the * transfer was scheduled. Ranges for frame counter values are HC-specific * and can go from as low as 256 to as high as 65536 frames. * * Isochronous URBs have a different data transfer model, in part because * the quality of service is only "best effort". Callers provide specially * allocated URBs, with number_of_packets worth of iso_frame_desc structures * at the end. Each such packet is an individual ISO transfer. Isochronous * URBs are normally queued, submitted by drivers to arrange that * transfers are at least double buffered, and then explicitly resubmitted * in completion handlers, so * that data (such as audio or video) streams at as constant a rate as the * host controller scheduler can support. * * Completion Callbacks: * * The completion callback is made in_interrupt(), and one of the first * things that a completion handler should do is check the status field. * The status field is provided for all URBs. It is used to report * unlinked URBs, and status for all non-ISO transfers. It should not * be examined before the URB is returned to the completion handler. * * The context field is normally used to link URBs back to the relevant * driver or request state. * * When the completion callback is invoked for non-isochronous URBs, the * actual_length field tells how many bytes were transferred. This field * is updated even when the URB terminated with an error or was unlinked. * * ISO transfer status is reported in the status and actual_length fields * of the iso_frame_desc array, and the number of errors is reported in * error_count. Completion callbacks for ISO transfers will normally * (re)submit URBs to ensure a constant transfer rate. * * Note that even fields marked "public" should not be touched by the driver * when the urb is owned by the hcd, that is, since the call to * usb_submit_urb() till the entry into the completion routine. */ struct urb { /* private: usb core and host controller only fields in the urb */ struct kref kref; /* reference count of the URB */ int unlinked; /* unlink error code */ void *hcpriv; /* private data for host controller */ atomic_t use_count; /* concurrent submissions counter */ atomic_t reject; /* submissions will fail */ /* public: documented fields in the urb that can be used by drivers */ struct list_head urb_list; /* list head for use by the urb's * current owner */ struct list_head anchor_list; /* the URB may be anchored */ struct usb_anchor *anchor; struct usb_device *dev; /* (in) pointer to associated device */ struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */ unsigned int pipe; /* (in) pipe information */ unsigned int stream_id; /* (in) stream ID */ int status; /* (return) non-ISO status */ unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ void *transfer_buffer; /* (in) associated data buffer */ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ struct scatterlist *sg; /* (in) scatter gather buffer list */ int num_mapped_sgs; /* (internal) mapped sg entries */ int num_sgs; /* (in) number of entries in the sg list */ u32 transfer_buffer_length; /* (in) data buffer length */ u32 actual_length; /* (return) actual transfer length */ unsigned char *setup_packet; /* (in) setup packet (control only) */ dma_addr_t setup_dma; /* (in) dma addr for setup_packet */ int start_frame; /* (modify) start frame (ISO) */ int number_of_packets; /* (in) number of ISO packets */ int interval; /* (modify) transfer interval * (INT/ISO) */ int error_count; /* (return) number of ISO errors */ void *context; /* (in) context for completion */ usb_complete_t complete; /* (in) completion routine */ struct usb_iso_packet_descriptor iso_frame_desc[]; /* (in) ISO ONLY */ }; /* ----------------------------------------------------------------------- */ /** * usb_fill_control_urb - initializes a control urb * @urb: pointer to the urb to initialize. * @dev: pointer to the struct usb_device for this urb. * @pipe: the endpoint pipe * @setup_packet: pointer to the setup_packet buffer. The buffer must be * suitable for DMA. * @transfer_buffer: pointer to the transfer buffer. The buffer must be * suitable for DMA. * @buffer_length: length of the transfer buffer * @complete_fn: pointer to the usb_complete_t function * @context: what to set the urb context to. * * Initializes a control urb with the proper information needed to submit * it to a device. * * The transfer buffer and the setup_packet buffer will most likely be filled * or read via DMA. The simplest way to get a buffer that can be DMAed to is * allocating it via kmalloc() or equivalent, even for very small buffers. * If the buffers are embedded in a bigger structure, there is a risk that * the buffer itself, the previous fields and/or the next fields are corrupted * due to cache incoherencies; or slowed down if they are evicted from the * cache. For more information, check &struct urb. * */ static inline void usb_fill_control_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, unsigned char *setup_packet, void *transfer_buffer, int buffer_length, usb_complete_t complete_fn, void *context) { urb->dev = dev; urb->pipe = pipe; urb->setup_packet = setup_packet; urb->transfer_buffer = transfer_buffer; urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; } /** * usb_fill_bulk_urb - macro to help initialize a bulk urb * @urb: pointer to the urb to initialize. * @dev: pointer to the struct usb_device for this urb. * @pipe: the endpoint pipe * @transfer_buffer: pointer to the transfer buffer. The buffer must be * suitable for DMA. * @buffer_length: length of the transfer buffer * @complete_fn: pointer to the usb_complete_t function * @context: what to set the urb context to. * * Initializes a bulk urb with the proper information needed to submit it * to a device. * * Refer to usb_fill_control_urb() for a description of the requirements for * transfer_buffer. */ static inline void usb_fill_bulk_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *transfer_buffer, int buffer_length, usb_complete_t complete_fn, void *context) { urb->dev = dev; urb->pipe = pipe; urb->transfer_buffer = transfer_buffer; urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; } /** * usb_fill_int_urb - macro to help initialize a interrupt urb * @urb: pointer to the urb to initialize. * @dev: pointer to the struct usb_device for this urb. * @pipe: the endpoint pipe * @transfer_buffer: pointer to the transfer buffer. The buffer must be * suitable for DMA. * @buffer_length: length of the transfer buffer * @complete_fn: pointer to the usb_complete_t function * @context: what to set the urb context to. * @interval: what to set the urb interval to, encoded like * the endpoint descriptor's bInterval value. * * Initializes a interrupt urb with the proper information needed to submit * it to a device. * * Refer to usb_fill_control_urb() for a description of the requirements for * transfer_buffer. * * Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic * encoding of the endpoint interval, and express polling intervals in * microframes (eight per millisecond) rather than in frames (one per * millisecond). */ static inline void usb_fill_int_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *transfer_buffer, int buffer_length, usb_complete_t complete_fn, void *context, int interval) { urb->dev = dev; urb->pipe = pipe; urb->transfer_buffer = transfer_buffer; urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { /* make sure interval is within allowed range */ interval = clamp(interval, 1, 16); urb->interval = 1 << (interval - 1); } else { urb->interval = interval; } urb->start_frame = -1; } extern void usb_init_urb(struct urb *urb); extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags); extern void usb_free_urb(struct urb *urb); #define usb_put_urb usb_free_urb extern struct urb *usb_get_urb(struct urb *urb); extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags); extern int usb_unlink_urb(struct urb *urb); extern void usb_kill_urb(struct urb *urb); extern void usb_poison_urb(struct urb *urb); extern void usb_unpoison_urb(struct urb *urb); extern void usb_block_urb(struct urb *urb); extern void usb_kill_anchored_urbs(struct usb_anchor *anchor); extern void usb_poison_anchored_urbs(struct usb_anchor *anchor); extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor); extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor); extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor); extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor); extern void usb_unanchor_urb(struct urb *urb); extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, unsigned int timeout); extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor); extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor); extern int usb_anchor_empty(struct usb_anchor *anchor); #define usb_unblock_urb usb_unpoison_urb /** * usb_urb_dir_in - check if an URB describes an IN transfer * @urb: URB to be checked * * Return: 1 if @urb describes an IN transfer (device-to-host), * otherwise 0. */ static inline int usb_urb_dir_in(struct urb *urb) { return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN; } /** * usb_urb_dir_out - check if an URB describes an OUT transfer * @urb: URB to be checked * * Return: 1 if @urb describes an OUT transfer (host-to-device), * otherwise 0. */ static inline int usb_urb_dir_out(struct urb *urb) { return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT; } int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe); int usb_urb_ep_type_check(const struct urb *urb); void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma); void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma); /*-------------------------------------------------------------------* * SYNCHRONOUS CALL SUPPORT * *-------------------------------------------------------------------*/ extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout); extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout); extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout); /* wrappers around usb_control_msg() for the most common standard requests */ int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, const void *data, __u16 size, int timeout, gfp_t memflags); int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout, gfp_t memflags); extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype, unsigned char descindex, void *buf, int size); extern int usb_get_status(struct usb_device *dev, int recip, int type, int target, void *data); static inline int usb_get_std_status(struct usb_device *dev, int recip, int target, void *data) { return usb_get_status(dev, recip, USB_STATUS_TYPE_STANDARD, target, data); } static inline int usb_get_ptm_status(struct usb_device *dev, void *data) { return usb_get_status(dev, USB_RECIP_DEVICE, USB_STATUS_TYPE_PTM, 0, data); } extern int usb_string(struct usb_device *dev, int index, char *buf, size_t size); extern char *usb_cache_string(struct usb_device *udev, int index); /* wrappers that also update important state inside usbcore */ extern int usb_clear_halt(struct usb_device *dev, int pipe); extern int usb_reset_configuration(struct usb_device *dev); extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); /* this request isn't really synchronous, but it belongs with the others */ extern int usb_driver_set_configuration(struct usb_device *udev, int config); /* choose and set configuration for device */ extern int usb_choose_configuration(struct usb_device *udev); extern int usb_set_configuration(struct usb_device *dev, int configuration); /* * timeouts, in milliseconds, used for sending/receiving control messages * they typically complete within a few frames (msec) after they're issued * USB identifies 5 second timeouts, maybe more in a few cases, and a few * slow devices (like some MGE Ellipse UPSes) actually push that limit. */ #define USB_CTRL_GET_TIMEOUT 5000 #define USB_CTRL_SET_TIMEOUT 5000 /** * struct usb_sg_request - support for scatter/gather I/O * @status: zero indicates success, else negative errno * @bytes: counts bytes transferred. * * These requests are initialized using usb_sg_init(), and then are used * as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most * members of the request object aren't for driver access. * * The status and bytecount values are valid only after usb_sg_wait() * returns. If the status is zero, then the bytecount matches the total * from the request. * * After an error completion, drivers may need to clear a halt condition * on the endpoint. */ struct usb_sg_request { int status; size_t bytes; /* private: * members below are private to usbcore, * and are not provided for driver access! */ spinlock_t lock; struct usb_device *dev; int pipe; int entries; struct urb **urbs; int count; struct completion complete; }; int usb_sg_init( struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags ); void usb_sg_cancel(struct usb_sg_request *io); void usb_sg_wait(struct usb_sg_request *io); /* ----------------------------------------------------------------------- */ /* * For various legacy reasons, Linux has a small cookie that's paired with * a struct usb_device to identify an endpoint queue. Queue characteristics * are defined by the endpoint's descriptor. This cookie is called a "pipe", * an unsigned int encoded as: * * - direction: bit 7 (0 = Host-to-Device [Out], * 1 = Device-to-Host [In] ... * like endpoint bEndpointAddress) * - device address: bits 8-14 ... bit positions known to uhci-hcd * - endpoint: bits 15-18 ... bit positions known to uhci-hcd * - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt, * 10 = control, 11 = bulk) * * Given the device address and endpoint descriptor, pipes are redundant. */ /* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */ /* (yet ... they're the values used by usbfs) */ #define PIPE_ISOCHRONOUS 0 #define PIPE_INTERRUPT 1 #define PIPE_CONTROL 2 #define PIPE_BULK 3 #define usb_pipein(pipe) ((pipe) & USB_DIR_IN) #define usb_pipeout(pipe) (!usb_pipein(pipe)) #define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f) #define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf) #define usb_pipetype(pipe) (((pipe) >> 30) & 3) #define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS) #define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT) #define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) #define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) static inline unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint) { return (dev->devnum << 8) | (endpoint << 15); } /* Create various pipes... */ #define usb_sndctrlpipe(dev, endpoint) \ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint)) #define usb_rcvctrlpipe(dev, endpoint) \ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) #define usb_sndisocpipe(dev, endpoint) \ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint)) #define usb_rcvisocpipe(dev, endpoint) \ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) #define usb_sndbulkpipe(dev, endpoint) \ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint)) #define usb_rcvbulkpipe(dev, endpoint) \ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) #define usb_sndintpipe(dev, endpoint) \ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint)) #define usb_rcvintpipe(dev, endpoint) \ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) static inline struct usb_host_endpoint * usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe) { struct usb_host_endpoint **eps; eps = usb_pipein(pipe) ? dev->ep_in : dev->ep_out; return eps[usb_pipeendpoint(pipe)]; } static inline u16 usb_maxpacket(struct usb_device *udev, int pipe) { struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe); if (!ep) return 0; /* NOTE: only 0x07ff bits are for packet size... */ return usb_endpoint_maxp(&ep->desc); } /* translate USB error codes to codes user space understands */ static inline int usb_translate_errors(int error_code) { switch (error_code) { case 0: case -ENOMEM: case -ENODEV: case -EOPNOTSUPP: return error_code; default: return -EIO; } } /* Events from the usb core */ #define USB_DEVICE_ADD 0x0001 #define USB_DEVICE_REMOVE 0x0002 #define USB_BUS_ADD 0x0003 #define USB_BUS_REMOVE 0x0004 extern void usb_register_notify(struct notifier_block *nb); extern void usb_unregister_notify(struct notifier_block *nb); /* debugfs stuff */ extern struct dentry *usb_debug_root; /* LED triggers */ enum usb_led_event { USB_LED_EVENT_HOST = 0, USB_LED_EVENT_GADGET = 1, }; #ifdef CONFIG_USB_LED_TRIG extern void usb_led_activity(enum usb_led_event ev); #else static inline void usb_led_activity(enum usb_led_event ev) {} #endif #endif /* __KERNEL__ */ #endif |
17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2018-2025 Intel Corporation */ #ifndef IEEE80211_I_H #define IEEE80211_I_H #include <linux/kernel.h> #include <linux/device.h> #include <linux/if_ether.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/etherdevice.h> #include <linux/leds.h> #include <linux/idr.h> #include <linux/rhashtable.h> #include <linux/rbtree.h> #include <kunit/visibility.h> #include <net/ieee80211_radiotap.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <net/fq.h> #include "key.h" #include "sta_info.h" #include "debug.h" #include "drop.h" extern const struct cfg80211_ops mac80211_config_ops; struct ieee80211_local; struct ieee80211_mesh_fast_tx; /* Maximum number of broadcast/multicast frames to buffer when some of the * associated stations are using power saving. */ #define AP_MAX_BC_BUFFER 128 /* Maximum number of frames buffered to all STAs, including multicast frames. * Note: increasing this limit increases the potential memory requirement. Each * frame can be up to about 2 kB long. */ #define TOTAL_MAX_TX_BUFFER 512 /* Required encryption head and tailroom */ #define IEEE80211_ENCRYPT_HEADROOM 8 #define IEEE80211_ENCRYPT_TAILROOM 18 /* power level hasn't been configured (or set to automatic) */ #define IEEE80211_UNSET_POWER_LEVEL INT_MIN /* * Some APs experience problems when working with U-APSD. Decreasing the * probability of that happening by using legacy mode for all ACs but VO isn't * enough. * * Cisco 4410N originally forced us to enable VO by default only because it * treated non-VO ACs as legacy. * * However some APs (notably Netgear R7000) silently reclassify packets to * different ACs. Since u-APSD ACs require trigger frames for frame retrieval * clients would never see some frames (e.g. ARP responses) or would fetch them * accidentally after a long time. * * It makes little sense to enable u-APSD queues by default because it needs * userspace applications to be aware of it to actually take advantage of the * possible additional powersavings. Implicitly depending on driver autotrigger * frame support doesn't make much sense. */ #define IEEE80211_DEFAULT_UAPSD_QUEUES 0 #define IEEE80211_DEFAULT_MAX_SP_LEN \ IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS]; #define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */) #define IEEE80211_MAX_NAN_INSTANCE_ID 255 enum ieee80211_status_data { IEEE80211_STATUS_TYPE_MASK = 0x00f, IEEE80211_STATUS_TYPE_INVALID = 0, IEEE80211_STATUS_TYPE_SMPS = 1, IEEE80211_STATUS_TYPE_NEG_TTLM = 2, IEEE80211_STATUS_SUBDATA_MASK = 0x1ff0, }; static inline bool ieee80211_sta_keep_active(struct sta_info *sta, u8 ac) { /* Keep a station's queues on the active list for deficit accounting * purposes if it was active or queued during the last 100ms. */ return time_before_eq(jiffies, sta->airtime[ac].last_active + HZ / 10); } struct ieee80211_bss { u32 device_ts_beacon, device_ts_presp; bool wmm_used; bool uapsd_supported; #define IEEE80211_MAX_SUPP_RATES 32 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; size_t supp_rates_len; struct ieee80211_rate *beacon_rate; u32 vht_cap_info; /* * During association, we save an ERP value from a probe response so * that we can feed ERP info to the driver when handling the * association completes. these fields probably won't be up-to-date * otherwise, you probably don't want to use them. */ bool has_erp_value; u8 erp_value; /* Keep track of the corruption of the last beacon/probe response. */ u8 corrupt_data; /* Keep track of what bits of information we have valid info for. */ u8 valid_data; }; /** * enum ieee80211_bss_corrupt_data_flags - BSS data corruption flags * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted * * These are bss flags that are attached to a bss in the * @corrupt_data field of &struct ieee80211_bss. */ enum ieee80211_bss_corrupt_data_flags { IEEE80211_BSS_CORRUPT_BEACON = BIT(0), IEEE80211_BSS_CORRUPT_PROBE_RESP = BIT(1) }; /** * enum ieee80211_bss_valid_data_flags - BSS valid data flags * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE * * These are bss flags that are attached to a bss in the * @valid_data field of &struct ieee80211_bss. They show which parts * of the data structure were received as a result of an un-corrupted * beacon/probe response. */ enum ieee80211_bss_valid_data_flags { IEEE80211_BSS_VALID_WMM = BIT(1), IEEE80211_BSS_VALID_RATES = BIT(2), IEEE80211_BSS_VALID_ERP = BIT(3) }; typedef unsigned __bitwise ieee80211_tx_result; #define TX_CONTINUE ((__force ieee80211_tx_result) 0u) #define TX_DROP ((__force ieee80211_tx_result) 1u) #define TX_QUEUED ((__force ieee80211_tx_result) 2u) #define IEEE80211_TX_UNICAST BIT(1) #define IEEE80211_TX_PS_BUFFERED BIT(2) struct ieee80211_tx_data { struct sk_buff *skb; struct sk_buff_head skbs; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; struct ieee80211_key *key; struct ieee80211_tx_rate rate; unsigned int flags; }; /** * enum ieee80211_packet_rx_flags - packet RX flags * @IEEE80211_RX_AMSDU: a-MSDU packet * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering * * These are per-frame flags that are attached to a frame in the * @rx_flags field of &struct ieee80211_rx_status. */ enum ieee80211_packet_rx_flags { IEEE80211_RX_AMSDU = BIT(3), IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4), IEEE80211_RX_DEFERRED_RELEASE = BIT(5), }; /** * enum ieee80211_rx_flags - RX data flags * * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported * to cfg80211_report_obss_beacon(). * * These flags are used across handling multiple interfaces * for a single frame. */ enum ieee80211_rx_flags { IEEE80211_RX_BEACON_REPORTED = BIT(0), }; struct ieee80211_rx_data { struct list_head *list; struct sk_buff *skb; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct ieee80211_link_data *link; struct sta_info *sta; struct link_sta_info *link_sta; struct ieee80211_key *key; unsigned int flags; /* * Index into sequence numbers array, 0..16 * since the last (16) is used for non-QoS, * will be 16 on non-QoS frames. */ int seqno_idx; /* * Index into the security IV/PN arrays, 0..16 * since the last (16) is used for CCMP-encrypted * management frames, will be set to 16 on mgmt * frames and 0 on non-QoS frames. */ int security_idx; int link_id; union { struct { u32 iv32; u16 iv16; } tkip; struct { u8 pn[IEEE80211_CCMP_PN_LEN]; } ccm_gcm; }; }; struct ieee80211_csa_settings { const u16 *counter_offsets_beacon; const u16 *counter_offsets_presp; int n_counter_offsets_beacon; int n_counter_offsets_presp; u8 count; }; struct ieee80211_color_change_settings { u16 counter_offset_beacon; u16 counter_offset_presp; u8 count; }; struct beacon_data { u8 *head, *tail; int head_len, tail_len; struct ieee80211_meshconf_ie *meshconf; u16 cntdwn_counter_offsets[IEEE80211_MAX_CNTDWN_COUNTERS_NUM]; u8 cntdwn_current_counter; struct cfg80211_mbssid_elems *mbssid_ies; struct cfg80211_rnr_elems *rnr_ies; struct rcu_head rcu_head; }; struct probe_resp { struct rcu_head rcu_head; int len; u16 cntdwn_counter_offsets[IEEE80211_MAX_CNTDWN_COUNTERS_NUM]; u8 data[]; }; struct fils_discovery_data { struct rcu_head rcu_head; int len; u8 data[]; }; struct unsol_bcast_probe_resp_data { struct rcu_head rcu_head; int len; u8 data[]; }; struct ps_data { /* yes, this looks ugly, but guarantees that we can later use * bitmap_empty :) * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)] __aligned(__alignof__(unsigned long)); struct sk_buff_head bc_buf; atomic_t num_sta_ps; /* number of stations in PS mode */ int dtim_count; bool dtim_bc_mc; }; struct ieee80211_if_ap { struct list_head vlans; /* write-protected with RTNL and local->mtx */ struct ps_data ps; atomic_t num_mcast_sta; /* number of stations receiving multicast */ bool multicast_to_unicast; bool active; }; struct ieee80211_if_vlan { struct list_head list; /* write-protected with RTNL and local->mtx */ /* used for all tx if the VLAN is configured to 4-addr mode */ struct sta_info __rcu *sta; atomic_t num_mcast_sta; /* number of stations receiving multicast */ }; struct mesh_stats { __u32 fwded_mcast; /* Mesh forwarded multicast frames */ __u32 fwded_unicast; /* Mesh forwarded unicast frames */ __u32 fwded_frames; /* Mesh total forwarded frames */ __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ __u32 dropped_frames_no_route; /* Not transmitted, no route found */ }; #define PREQ_Q_F_START 0x1 #define PREQ_Q_F_REFRESH 0x2 struct mesh_preq_queue { struct list_head list; u8 dst[ETH_ALEN]; u8 flags; }; struct ieee80211_roc_work { struct list_head list; struct ieee80211_sub_if_data *sdata; struct ieee80211_channel *chan; bool started, abort, hw_begun, notified; bool on_channel; unsigned long start_time; u32 duration, req_duration; struct sk_buff *frame; u64 cookie, mgmt_tx_cookie; enum ieee80211_roc_type type; }; /* flags used in struct ieee80211_if_managed.flags */ enum ieee80211_sta_flags { IEEE80211_STA_CONNECTION_POLL = BIT(1), IEEE80211_STA_CONTROL_PORT = BIT(2), IEEE80211_STA_MFP_ENABLED = BIT(6), IEEE80211_STA_UAPSD_ENABLED = BIT(7), IEEE80211_STA_NULLFUNC_ACKED = BIT(8), IEEE80211_STA_ENABLE_RRM = BIT(15), }; enum ieee80211_conn_mode { IEEE80211_CONN_MODE_S1G, IEEE80211_CONN_MODE_LEGACY, IEEE80211_CONN_MODE_HT, IEEE80211_CONN_MODE_VHT, IEEE80211_CONN_MODE_HE, IEEE80211_CONN_MODE_EHT, }; #define IEEE80211_CONN_MODE_HIGHEST IEEE80211_CONN_MODE_EHT enum ieee80211_conn_bw_limit { IEEE80211_CONN_BW_LIMIT_20, IEEE80211_CONN_BW_LIMIT_40, IEEE80211_CONN_BW_LIMIT_80, IEEE80211_CONN_BW_LIMIT_160, /* also 80+80 */ IEEE80211_CONN_BW_LIMIT_320, }; struct ieee80211_conn_settings { enum ieee80211_conn_mode mode; enum ieee80211_conn_bw_limit bw_limit; }; extern const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited; struct ieee80211_mgd_auth_data { struct cfg80211_bss *bss; unsigned long timeout; int tries; u16 algorithm, expected_transaction; unsigned long userspace_selectors[BITS_TO_LONGS(128)]; u8 key[WLAN_KEY_LEN_WEP104]; u8 key_len, key_idx; bool done, waiting; bool peer_confirmed; bool timeout_started; int link_id; u8 ap_addr[ETH_ALEN] __aligned(2); u16 sae_trans, sae_status; size_t data_len; u8 data[]; }; struct ieee80211_mgd_assoc_data { struct { struct cfg80211_bss *bss; u8 addr[ETH_ALEN] __aligned(2); u8 ap_ht_param; struct ieee80211_vht_cap ap_vht_cap; size_t elems_len; u8 *elems; /* pointing to inside ie[] below */ struct ieee80211_conn_settings conn; u16 status; bool disabled; } link[IEEE80211_MLD_MAX_NUM_LINKS]; u8 ap_addr[ETH_ALEN] __aligned(2); /* this is for a workaround, so we use it only for non-MLO */ const u8 *supp_rates; u8 supp_rates_len; unsigned long timeout; int tries; u8 prev_ap_addr[ETH_ALEN]; u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len; bool wmm, uapsd; bool need_beacon; bool synced; bool timeout_started; bool comeback; /* whether the AP has requested association comeback */ bool s1g; bool spp_amsdu; s8 assoc_link_id; __le16 ext_mld_capa_ops; u8 fils_nonces[2 * FILS_NONCE_LEN]; u8 fils_kek[FILS_MAX_KEK_LEN]; size_t fils_kek_len; size_t ie_len; u8 *ie_pos; /* used to fill ie[] with link[].elems */ u8 ie[]; }; struct ieee80211_sta_tx_tspec { /* timestamp of the first packet in the time slice */ unsigned long time_slice_start; u32 admitted_time; /* in usecs, unlike over the air */ u8 tsid; s8 up; /* signed to be able to invalidate with -1 during teardown */ /* consumed TX time in microseconds in the time slice */ u32 consumed_tx_time; enum { TX_TSPEC_ACTION_NONE = 0, TX_TSPEC_ACTION_DOWNGRADE, TX_TSPEC_ACTION_STOP_DOWNGRADE, } action; bool downgraded; }; /* Advertised TID-to-link mapping info */ struct ieee80211_adv_ttlm_info { /* time in TUs at which the new mapping is established, or 0 if there is * no planned advertised TID-to-link mapping */ u16 switch_time; u32 duration; /* duration of the planned T2L map in TUs */ u16 map; /* map of usable links for all TIDs */ bool active; /* whether the advertised mapping is active or not */ }; DECLARE_EWMA(beacon_signal, 4, 4) struct ieee80211_if_managed { struct timer_list timer; struct timer_list conn_mon_timer; struct timer_list bcn_mon_timer; struct wiphy_work monitor_work; struct wiphy_work beacon_connection_loss_work; struct wiphy_work csa_connection_drop_work; unsigned long beacon_timeout; unsigned long probe_timeout; int probe_send_count; bool nullfunc_failed; u8 connection_loss:1, driver_disconnect:1, reconnect:1, associated:1; struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_mgd_assoc_data *assoc_data; unsigned long userspace_selectors[BITS_TO_LONGS(128)]; bool powersave; /* powersave requested for this iface */ bool broken_ap; /* AP is broken -- turn off powersave */ unsigned int flags; u16 mcast_seq_last; bool status_acked; bool status_received; __le16 status_fc; enum { IEEE80211_MFP_DISABLED, IEEE80211_MFP_OPTIONAL, IEEE80211_MFP_REQUIRED } mfp; /* management frame protection */ /* * Bitmask of enabled u-apsd queues, * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association * to take effect. */ unsigned int uapsd_queues; /* * Maximum number of buffered frames AP can deliver during a * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar. * Needs a new association to take effect. */ unsigned int uapsd_max_sp_len; u8 use_4addr; /* * State variables for keeping track of RSSI of the AP currently * connected to and informing driver when RSSI has gone * below/above a certain threshold. */ int rssi_min_thold, rssi_max_thold; struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */ struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */ struct ieee80211_s1g_cap s1g_capa; /* configured S1G overrides */ struct ieee80211_s1g_cap s1g_capa_mask; /* valid s1g_capa bits */ /* TDLS support */ u8 tdls_peer[ETH_ALEN] __aligned(2); struct wiphy_delayed_work tdls_peer_del_work; struct sk_buff *orig_teardown_skb; /* The original teardown skb */ struct sk_buff *teardown_skb; /* A copy to send through the AP */ spinlock_t teardown_lock; /* To lock changing teardown_skb */ bool tdls_wider_bw_prohibited; /* WMM-AC TSPEC support */ struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS]; /* Use a separate work struct so that we can do something here * while the sdata->work is flushing the queues, for example. * otherwise, in scenarios where we hardly get any traffic out * on the BE queue, but there's a lot of VO traffic, we might * get stuck in a downgraded situation and flush takes forever. */ struct wiphy_delayed_work tx_tspec_wk; /* Information elements from the last transmitted (Re)Association * Request frame. */ u8 *assoc_req_ies; size_t assoc_req_ies_len; struct wiphy_delayed_work ml_reconf_work; u16 removed_links; /* TID-to-link mapping support */ struct wiphy_delayed_work ttlm_work; struct ieee80211_adv_ttlm_info ttlm_info; struct wiphy_work teardown_ttlm_work; /* dialog token enumerator for neg TTLM request */ u8 dialog_token_alloc; struct wiphy_delayed_work neg_ttlm_timeout_work; /* Locally initiated multi-link reconfiguration */ struct { struct ieee80211_mgd_assoc_data *add_links_data; struct wiphy_delayed_work wk; u16 removed_links; u16 added_links; u8 dialog_token; } reconf; /* Support for epcs */ struct { bool enabled; u8 dialog_token; } epcs; }; struct ieee80211_if_ibss { struct timer_list timer; struct wiphy_work csa_connection_drop_work; unsigned long last_scan_completed; u32 basic_rates; bool fixed_bssid; bool fixed_channel; bool privacy; bool control_port; bool userspace_handles_dfs; u8 bssid[ETH_ALEN] __aligned(2); u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len, ie_len; u8 *ie; struct cfg80211_chan_def chandef; unsigned long ibss_join_req; /* probe response/beacon for IBSS */ struct beacon_data __rcu *presp; struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ spinlock_t incomplete_lock; struct list_head incomplete_stations; enum { IEEE80211_IBSS_MLME_SEARCH, IEEE80211_IBSS_MLME_JOINED, } state; }; /** * struct ieee80211_if_ocb - OCB mode state * * @housekeeping_timer: timer for periodic invocation of a housekeeping task * @wrkq_flags: OCB deferred task action * @incomplete_lock: delayed STA insertion lock * @incomplete_stations: list of STAs waiting for delayed insertion * @joined: indication if the interface is connected to an OCB network */ struct ieee80211_if_ocb { struct timer_list housekeeping_timer; unsigned long wrkq_flags; spinlock_t incomplete_lock; struct list_head incomplete_stations; bool joined; }; /** * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface * * these declarations define the interface, which enables * vendor-specific mesh synchronization * * @rx_bcn_presp: beacon/probe response was received * @adjust_tsf: TSF adjustment method */ struct ieee80211_mesh_sync_ops { void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, u16 stype, struct ieee80211_mgmt *mgmt, unsigned int len, const struct ieee80211_meshconf_ie *mesh_cfg, struct ieee80211_rx_status *rx_status); /* should be called with beacon_data under RCU read lock */ void (*adjust_tsf)(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon); /* add other framework functions here */ }; struct mesh_csa_settings { struct rcu_head rcu_head; struct cfg80211_csa_settings settings; }; /** * struct mesh_table - mesh hash table * * @known_gates: list of known mesh gates and their mpaths by the station. The * gate's mpath may or may not be resolved and active. * @gates_lock: protects updates to known_gates * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr * @walk_head: linked list containing all mesh_path objects * @walk_lock: lock protecting walk_head * @entries: number of entries in the table */ struct mesh_table { struct hlist_head known_gates; spinlock_t gates_lock; struct rhashtable rhead; struct hlist_head walk_head; spinlock_t walk_lock; atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ }; /** * struct mesh_tx_cache - mesh fast xmit header cache * * @rht: hash table containing struct ieee80211_mesh_fast_tx, using skb DA as key * @walk_head: linked list containing all ieee80211_mesh_fast_tx objects * @walk_lock: lock protecting walk_head and rht */ struct mesh_tx_cache { struct rhashtable rht; struct hlist_head walk_head; spinlock_t walk_lock; }; struct ieee80211_if_mesh { struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; struct timer_list mesh_path_root_timer; unsigned long wrkq_flags; unsigned long mbss_changed[64 / BITS_PER_LONG]; bool userspace_handles_dfs; u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; size_t mesh_id_len; /* Active Path Selection Protocol Identifier */ u8 mesh_pp_id; /* Active Path Selection Metric Identifier */ u8 mesh_pm_id; /* Congestion Control Mode Identifier */ u8 mesh_cc_id; /* Synchronization Protocol Identifier */ u8 mesh_sp_id; /* Authentication Protocol Identifier */ u8 mesh_auth_id; /* Local mesh Sequence Number */ u32 sn; /* Last used PREQ ID */ u32 preq_id; atomic_t mpaths; /* Timestamp of last SN update */ unsigned long last_sn_update; /* Time when it's ok to send next PERR */ unsigned long next_perr; /* Timestamp of last PREQ sent */ unsigned long last_preq; struct mesh_rmc *rmc; spinlock_t mesh_preq_queue_lock; struct mesh_preq_queue preq_queue; int preq_queue_len; struct mesh_stats mshstats; struct mesh_config mshcfg; atomic_t estab_plinks; atomic_t mesh_seqnum; bool accepting_plinks; int num_gates; struct beacon_data __rcu *beacon; const u8 *ie; u8 ie_len; enum { IEEE80211_MESH_SEC_NONE = 0x0, IEEE80211_MESH_SEC_AUTHED = 0x1, IEEE80211_MESH_SEC_SECURED = 0x2, } security; bool user_mpm; /* Extensible Synchronization Framework */ const struct ieee80211_mesh_sync_ops *sync_ops; s64 sync_offset_clockdrift_max; spinlock_t sync_offset_lock; /* mesh power save */ enum nl80211_mesh_power_mode nonpeer_pm; int ps_peers_light_sleep; int ps_peers_deep_sleep; struct ps_data ps; /* Channel Switching Support */ struct mesh_csa_settings __rcu *csa; enum { IEEE80211_MESH_CSA_ROLE_NONE, IEEE80211_MESH_CSA_ROLE_INIT, IEEE80211_MESH_CSA_ROLE_REPEATER, } csa_role; u8 chsw_ttl; u16 pre_value; /* offset from skb->data while building IE */ int meshconf_offset; struct mesh_table mesh_paths; struct mesh_table mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; int mpp_paths_generation; struct mesh_tx_cache tx_cache; }; #ifdef CONFIG_MAC80211_MESH #define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ do { (msh)->mshstats.name++; } while (0) #else #define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ do { } while (0) #endif /** * enum ieee80211_sub_if_data_flags - virtual interface flags * * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between * associated stations and deliver multicast frames both * back to wireless media and to the local net stack. * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume. * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver * @IEEE80211_SDATA_DISCONNECT_HW_RESTART: Disconnect after hardware restart * recovery */ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_ALLMULTI = BIT(0), IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4), IEEE80211_SDATA_IN_DRIVER = BIT(5), IEEE80211_SDATA_DISCONNECT_HW_RESTART = BIT(6), }; /** * enum ieee80211_sdata_state_bits - virtual interface state bits * @SDATA_STATE_RUNNING: virtual interface is up & running; this * mirrors netif_running() but is separate for interface type * change handling while the interface is up * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel * mode, so queues are stopped * @SDATA_STATE_OFFCHANNEL_BEACON_STOPPED: Beaconing was stopped due * to offchannel, reset when offchannel returns */ enum ieee80211_sdata_state_bits { SDATA_STATE_RUNNING, SDATA_STATE_OFFCHANNEL, SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, }; /** * enum ieee80211_chanctx_mode - channel context configuration mode * * @IEEE80211_CHANCTX_SHARED: channel context may be used by * multiple interfaces * @IEEE80211_CHANCTX_EXCLUSIVE: channel context can be used * only by a single interface. This can be used for example for * non-fixed channel IBSS. */ enum ieee80211_chanctx_mode { IEEE80211_CHANCTX_SHARED, IEEE80211_CHANCTX_EXCLUSIVE }; /** * enum ieee80211_chanctx_replace_state - channel context replacement state * * This is used for channel context in-place reservations that require channel * context switch/swap. * * @IEEE80211_CHANCTX_REPLACE_NONE: no replacement is taking place * @IEEE80211_CHANCTX_WILL_BE_REPLACED: this channel context will be replaced * by a (not yet registered) channel context pointed by %replace_ctx. * @IEEE80211_CHANCTX_REPLACES_OTHER: this (not yet registered) channel context * replaces an existing channel context pointed to by %replace_ctx. */ enum ieee80211_chanctx_replace_state { IEEE80211_CHANCTX_REPLACE_NONE, IEEE80211_CHANCTX_WILL_BE_REPLACED, IEEE80211_CHANCTX_REPLACES_OTHER, }; struct ieee80211_chanctx { struct list_head list; struct rcu_head rcu_head; struct list_head assigned_links; struct list_head reserved_links; enum ieee80211_chanctx_replace_state replace_state; struct ieee80211_chanctx *replace_ctx; enum ieee80211_chanctx_mode mode; bool driver_present; /* temporary data for search algorithm etc. */ struct ieee80211_chan_req req; bool radar_detected; /* MUST be last - ends in a flexible-array member. */ struct ieee80211_chanctx_conf conf; }; struct mac80211_qos_map { struct cfg80211_qos_map qos_map; struct rcu_head rcu_head; }; enum txq_info_flags { IEEE80211_TXQ_STOP, IEEE80211_TXQ_AMPDU, IEEE80211_TXQ_NO_AMSDU, IEEE80211_TXQ_DIRTY, }; /** * struct txq_info - per tid queue * * @tin: contains packets split into multiple flows * @def_cvars: codel vars for the @tin's default_flow * @cstats: code statistics for this queue * @frags: used to keep fragments created after dequeue * @schedule_order: used with ieee80211_local->active_txqs * @schedule_round: counter to prevent infinite loops on TXQ scheduling * @flags: TXQ flags from &enum txq_info_flags * @txq: the driver visible part */ struct txq_info { struct fq_tin tin; struct codel_vars def_cvars; struct codel_stats cstats; u16 schedule_round; struct list_head schedule_order; struct sk_buff_head frags; unsigned long flags; /* keep last! */ struct ieee80211_txq txq; }; struct ieee80211_if_mntr { u32 flags; u8 mu_follow_addr[ETH_ALEN] __aligned(2); struct list_head list; }; /** * struct ieee80211_if_nan - NAN state * * @conf: current NAN configuration * @func_lock: lock for @func_inst_ids * @function_inst_ids: a bitmap of available instance_id's */ struct ieee80211_if_nan { struct cfg80211_nan_conf conf; /* protects function_inst_ids */ spinlock_t func_lock; struct idr function_inst_ids; }; struct ieee80211_link_data_managed { u8 bssid[ETH_ALEN] __aligned(2); u8 dtim_period; enum ieee80211_smps_mode req_smps, /* requested smps mode */ driver_smps_mode; /* smps mode request */ struct ieee80211_conn_settings conn; s16 p2p_noa_index; bool tdls_chan_switch_prohibited; bool have_beacon; bool tracking_signal_avg; bool disable_wmm_tracking; bool operating_11g_mode; struct { struct wiphy_delayed_work switch_work; struct cfg80211_chan_def ap_chandef; struct ieee80211_parsed_tpe tpe; unsigned long time; bool waiting_bcn; bool ignored_same_chan; bool blocked_tx; } csa; struct wiphy_work request_smps_work; /* used to reconfigure hardware SM PS */ struct wiphy_work recalc_smps; bool beacon_crc_valid; u32 beacon_crc; struct ewma_beacon_signal ave_beacon_signal; int last_ave_beacon_signal; /* * Number of Beacon frames used in ave_beacon_signal. This can be used * to avoid generating less reliable cqm events that would be based * only on couple of received frames. */ unsigned int count_beacon_signal; /* Number of times beacon loss was invoked. */ unsigned int beacon_loss_count; /* * Last Beacon frame signal strength average (ave_beacon_signal / 16) * that triggered a cqm event. 0 indicates that no event has been * generated for the current association. */ int last_cqm_event_signal; int wmm_last_param_set; int mu_edca_last_param_set; }; struct ieee80211_link_data_ap { struct beacon_data __rcu *beacon; struct probe_resp __rcu *probe_resp; struct fils_discovery_data __rcu *fils_discovery; struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp; /* to be used after channel switch. */ struct cfg80211_beacon_data *next_beacon; }; struct ieee80211_link_data { struct ieee80211_sub_if_data *sdata; unsigned int link_id; struct list_head assigned_chanctx_list; /* protected by wiphy mutex */ struct list_head reserved_chanctx_list; /* protected by wiphy mutex */ /* multicast keys only */ struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS]; struct ieee80211_key __rcu *default_multicast_key; struct ieee80211_key __rcu *default_mgmt_key; struct ieee80211_key __rcu *default_beacon_key; bool operating_11g_mode; struct { struct wiphy_work finalize_work; struct ieee80211_chan_req chanreq; } csa; struct wiphy_work color_change_finalize_work; struct wiphy_delayed_work color_collision_detect_work; u64 color_bitmap; /* context reservation -- protected with wiphy mutex */ struct ieee80211_chanctx *reserved_chanctx; struct ieee80211_chan_req reserved; bool reserved_radar_required; bool reserved_ready; u8 needed_rx_chains; enum ieee80211_smps_mode smps_mode; int user_power_level; /* in dBm */ int ap_power_level; /* in dBm */ bool radar_required; struct wiphy_delayed_work dfs_cac_timer_work; union { struct ieee80211_link_data_managed mgd; struct ieee80211_link_data_ap ap; } u; struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; struct ieee80211_bss_conf *conf; #ifdef CONFIG_MAC80211_DEBUGFS struct dentry *debugfs_dir; #endif }; struct ieee80211_sub_if_data { struct list_head list; struct wireless_dev wdev; /* keys */ struct list_head key_list; /* count for keys needing tailroom space allocation */ int crypto_tx_tailroom_needed_cnt; int crypto_tx_tailroom_pending_dec; struct wiphy_delayed_work dec_tailroom_needed_wk; struct net_device *dev; struct ieee80211_local *local; unsigned int flags; unsigned long state; char name[IFNAMSIZ]; struct ieee80211_fragment_cache frags; /* TID bitmap for NoAck policy */ u16 noack_map; /* bit field of ACM bits (BIT(802.1D tag)) */ u8 wmm_acm; struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS]; struct ieee80211_key __rcu *default_unicast_key; u16 sequence_number; u16 mld_mcast_seq; __be16 control_port_protocol; bool control_port_no_encrypt; bool control_port_no_preauth; bool control_port_over_nl80211; atomic_t num_tx_queued; struct mac80211_qos_map __rcu *qos_map; struct wiphy_work work; struct sk_buff_head skb_queue; struct sk_buff_head status_queue; /* * AP this belongs to: self in AP mode and * corresponding AP in VLAN mode, NULL for * all others (might be needed later in IBSS) */ struct ieee80211_if_ap *bss; /* bitmap of allowed (non-MCS) rate indexes for rate control */ u32 rc_rateidx_mask[NUM_NL80211_BANDS]; bool rc_has_mcs_mask[NUM_NL80211_BANDS]; u8 rc_rateidx_mcs_mask[NUM_NL80211_BANDS][IEEE80211_HT_MCS_MASK_LEN]; bool rc_has_vht_mcs_mask[NUM_NL80211_BANDS]; u16 rc_rateidx_vht_mcs_mask[NUM_NL80211_BANDS][NL80211_VHT_NSS_MAX]; /* Beacon frame (non-MCS) rate (as a bitmap) */ u32 beacon_rateidx_mask[NUM_NL80211_BANDS]; bool beacon_rate_set; union { struct ieee80211_if_ap ap; struct ieee80211_if_vlan vlan; struct ieee80211_if_managed mgd; struct ieee80211_if_ibss ibss; struct ieee80211_if_mesh mesh; struct ieee80211_if_ocb ocb; struct ieee80211_if_mntr mntr; struct ieee80211_if_nan nan; } u; struct ieee80211_link_data deflink; struct ieee80211_link_data __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; /* for ieee80211_set_active_links_async() */ struct wiphy_work activate_links_work; u16 desired_active_links; u16 restart_active_links; #ifdef CONFIG_MAC80211_DEBUGFS struct { struct dentry *subdir_stations; struct dentry *default_unicast_key; struct dentry *default_multicast_key; struct dentry *default_mgmt_key; struct dentry *default_beacon_key; } debugfs; #endif /* must be last, dynamically sized area in this! */ struct ieee80211_vif vif; }; static inline struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) { return container_of(p, struct ieee80211_sub_if_data, vif); } #define sdata_dereference(p, sdata) \ wiphy_dereference(sdata->local->hw.wiphy, p) #define for_each_sdata_link(_local, _link) \ /* outer loop just to define the variables ... */ \ for (struct ieee80211_sub_if_data *___sdata = NULL; \ !___sdata; \ ___sdata = (void *)~0 /* always stop */) \ list_for_each_entry(___sdata, &(_local)->interfaces, list) \ if (ieee80211_sdata_running(___sdata)) \ for (int ___link_id = 0; \ ___link_id < ARRAY_SIZE(___sdata->link); \ ___link_id++) \ if ((_link = wiphy_dereference((_local)->hw.wiphy, \ ___sdata->link[___link_id]))) #define for_each_link_data(sdata, __link) \ struct ieee80211_sub_if_data *__sdata = sdata; \ for (int __link_id = 0; \ __link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \ if ((!(__sdata)->vif.valid_links || \ (__sdata)->vif.valid_links & BIT(__link_id)) && \ ((__link) = sdata_dereference((__sdata)->link[__link_id], \ (__sdata)))) static inline int ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems, struct cfg80211_rnr_elems *rnr_elems, u8 i) { int len = 0; if (!elems || !elems->cnt || i > elems->cnt) return 0; if (i < elems->cnt) { len = elems->elem[i].len; if (rnr_elems) { len += rnr_elems->elem[i].len; for (i = elems->cnt; i < rnr_elems->cnt; i++) len += rnr_elems->elem[i].len; } return len; } /* i == elems->cnt, calculate total length of all MBSSID elements */ for (i = 0; i < elems->cnt; i++) len += elems->elem[i].len; if (rnr_elems) { for (i = 0; i < rnr_elems->cnt; i++) len += rnr_elems->elem[i].len; } return len; } enum { IEEE80211_RX_MSG = 1, IEEE80211_TX_STATUS_MSG = 2, }; enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_DRIVER, IEEE80211_QUEUE_STOP_REASON_PS, IEEE80211_QUEUE_STOP_REASON_CSA, IEEE80211_QUEUE_STOP_REASON_AGGREGATION, IEEE80211_QUEUE_STOP_REASON_SUSPEND, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, IEEE80211_QUEUE_STOP_REASON_FLUSH, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, IEEE80211_QUEUE_STOP_REASONS, }; #ifdef CONFIG_MAC80211_LEDS struct tpt_led_trigger { char name[32]; const struct ieee80211_tpt_blink *blink_table; unsigned int blink_table_len; struct timer_list timer; struct ieee80211_local *local; unsigned long prev_traffic; unsigned long tx_bytes, rx_bytes; unsigned int active, want; bool running; }; #endif /** * enum mac80211_scan_flags - currently active scan mode * * @SCAN_SW_SCANNING: We're currently in the process of scanning but may as * well be on the operating channel * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to * determine if we are on the operating channel or not * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating * channel. This should not interrupt normal traffic. * @SCAN_COMPLETED: Set for our scan work function when the driver reported * that the scan completed. * @SCAN_ABORTED: Set for our scan work function when the driver reported * a scan complete for an aborted scan. * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being * cancelled. * @SCAN_BEACON_WAIT: Set whenever we're passive scanning because of radar/no-IR * and could send a probe request after receiving a beacon. * @SCAN_BEACON_DONE: Beacon received, we can now send a probe request */ enum mac80211_scan_flags { SCAN_SW_SCANNING, SCAN_HW_SCANNING, SCAN_ONCHANNEL_SCANNING, SCAN_COMPLETED, SCAN_ABORTED, SCAN_HW_CANCELLED, SCAN_BEACON_WAIT, SCAN_BEACON_DONE, }; /** * enum mac80211_scan_state - scan state machine states * * @SCAN_DECISION: Main entry point to the scan state machine, this state * determines if we should keep on scanning or switch back to the * operating channel * @SCAN_SET_CHANNEL: Set the next channel to be scanned * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to * send out data * @SCAN_RESUME: Resume the scan and scan the next channel * @SCAN_ABORT: Abort the scan and go back to operating channel */ enum mac80211_scan_state { SCAN_DECISION, SCAN_SET_CHANNEL, SCAN_SEND_PROBE, SCAN_SUSPEND, SCAN_RESUME, SCAN_ABORT, }; DECLARE_STATIC_KEY_FALSE(aql_disable); struct ieee80211_local { /* embed the driver visible part. * don't cast (use the static inlines below), but we keep * it first anyway so they become a no-op */ struct ieee80211_hw hw; struct fq fq; struct codel_vars *cvars; struct codel_params cparams; /* protects active_txqs and txqi->schedule_order */ spinlock_t active_txq_lock[IEEE80211_NUM_ACS]; struct list_head active_txqs[IEEE80211_NUM_ACS]; u16 schedule_round[IEEE80211_NUM_ACS]; /* serializes ieee80211_handle_wake_tx_queue */ spinlock_t handle_wake_tx_queue_lock; u16 airtime_flags; u32 aql_txq_limit_low[IEEE80211_NUM_ACS]; u32 aql_txq_limit_high[IEEE80211_NUM_ACS]; u32 aql_threshold; atomic_t aql_total_pending_airtime; atomic_t aql_ac_pending_airtime[IEEE80211_NUM_ACS]; const struct ieee80211_ops *ops; /* * private workqueue to mac80211. mac80211 makes this accessible * via ieee80211_queue_work() */ struct workqueue_struct *workqueue; unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; int q_stop_reasons[IEEE80211_MAX_QUEUES][IEEE80211_QUEUE_STOP_REASONS]; /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ spinlock_t queue_stop_reason_lock; int open_count; int monitors, virt_monitors, tx_mntrs; /* number of interfaces with corresponding FIF_ flags */ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, fif_probe_req; bool probe_req_reg; bool rx_mcast_action_reg; unsigned int filter_flags; /* FIF_* */ bool wiphy_ciphers_allocated; struct cfg80211_chan_def dflt_chandef; bool emulate_chanctx; /* protects the aggregated multicast list and filter calls */ spinlock_t filter_lock; /* used for uploading changed mc list */ struct wiphy_work reconfig_filter; /* aggregated multicast list */ struct netdev_hw_addr_list mc_list; bool tim_in_locked_section; /* see ieee80211_beacon_get() */ /* * suspended is true if we finished all the suspend _and_ we have * not yet come up from resume. This is to be used by mac80211 * to ensure driver sanity during suspend and mac80211's own * sanity. It can eventually be used for WoW as well. */ bool suspended; /* suspending is true during the whole suspend process */ bool suspending; /* * Resuming is true while suspended, but when we're reprogramming the * hardware -- at that time it's allowed to use ieee80211_queue_work() * again even though some other parts of the stack are still suspended * and we still drop received frames to avoid waking the stack. */ bool resuming; /* * quiescing is true during the suspend process _only_ to * ease timer cancelling etc. */ bool quiescing; /* device is started */ bool started; /* device is during a HW reconfig */ bool in_reconfig; /* reconfiguration failed ... suppress some warnings etc. */ bool reconfig_failure; /* wowlan is enabled -- don't reconfig on resume */ bool wowlan; struct wiphy_work radar_detected_work; /* number of RX chains the hardware has */ u8 rx_chains; /* bitmap of which sbands were copied */ u8 sband_allocated; int tx_headroom; /* required headroom for hardware/radiotap */ /* Tasklet and skb queue to process calls from IRQ mode. All frames * added to skb_queue will be processed, but frames in * skb_queue_unreliable may be dropped if the total length of these * queues increases over the limit. */ #define IEEE80211_IRQSAFE_QUEUE_LIMIT 128 struct tasklet_struct tasklet; struct sk_buff_head skb_queue; struct sk_buff_head skb_queue_unreliable; spinlock_t rx_path_lock; /* Station data */ /* * The list, hash table and counter are protected * by the wiphy mutex, reads are done with RCU. */ spinlock_t tim_lock; unsigned long num_sta; struct list_head sta_list; struct rhltable sta_hash; struct rhltable link_sta_hash; struct timer_list sta_cleanup; int sta_generation; struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; struct tasklet_struct tx_pending_tasklet; struct tasklet_struct wake_txqs_tasklet; atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES]; /* number of interfaces with allmulti RX */ atomic_t iff_allmultis; struct rate_control_ref *rate_ctrl; struct arc4_ctx wep_tx_ctx; struct arc4_ctx wep_rx_ctx; u32 wep_iv; /* see iface.c */ struct list_head interfaces; struct list_head mon_list; /* only that are IFF_UP */ struct mutex iflist_mtx; /* Scanning and BSS list */ unsigned long scanning; struct cfg80211_ssid scan_ssid; struct cfg80211_scan_request *int_scan_req; struct cfg80211_scan_request __rcu *scan_req; struct ieee80211_scan_request *hw_scan_req; struct cfg80211_chan_def scan_chandef; enum nl80211_band hw_scan_band; int scan_channel_idx; int scan_ies_len; int hw_scan_ies_bufsize; struct cfg80211_scan_info scan_info; struct wiphy_work sched_scan_stopped_work; struct ieee80211_sub_if_data __rcu *sched_scan_sdata; struct cfg80211_sched_scan_request __rcu *sched_scan_req; u8 scan_addr[ETH_ALEN]; unsigned long leave_oper_channel_time; enum mac80211_scan_state next_scan_state; struct wiphy_delayed_work scan_work; struct ieee80211_sub_if_data __rcu *scan_sdata; /* Temporary remain-on-channel for off-channel operations */ struct ieee80211_channel *tmp_channel; /* channel contexts */ struct list_head chanctx_list; #ifdef CONFIG_MAC80211_LEDS struct led_trigger tx_led, rx_led, assoc_led, radio_led; struct led_trigger tpt_led; atomic_t tx_led_active, rx_led_active, assoc_led_active; atomic_t radio_led_active, tpt_led_active; struct tpt_led_trigger *tpt_led_trigger; #endif #ifdef CONFIG_MAC80211_DEBUG_COUNTERS /* SNMP counters */ /* dot11CountersTable */ u32 dot11TransmittedFragmentCount; u32 dot11MulticastTransmittedFrameCount; u32 dot11FailedCount; u32 dot11RetryCount; u32 dot11MultipleRetryCount; u32 dot11FrameDuplicateCount; u32 dot11ReceivedFragmentCount; u32 dot11MulticastReceivedFrameCount; u32 dot11TransmittedFrameCount; /* TX/RX handler statistics */ unsigned int tx_handlers_drop; unsigned int tx_handlers_queued; unsigned int tx_handlers_drop_wep; unsigned int tx_handlers_drop_not_assoc; unsigned int tx_handlers_drop_unauth_port; unsigned int rx_handlers_drop; unsigned int rx_handlers_queued; unsigned int rx_handlers_drop_nullfunc; unsigned int rx_handlers_drop_defrag; unsigned int tx_expand_skb_head; unsigned int tx_expand_skb_head_cloned; unsigned int rx_expand_skb_head_defrag; unsigned int rx_handlers_fragments; unsigned int tx_status_drop; #define I802_DEBUG_INC(c) (c)++ #else /* CONFIG_MAC80211_DEBUG_COUNTERS */ #define I802_DEBUG_INC(c) do { } while (0) #endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ int total_ps_buffered; /* total number of all buffered unicast and * multicast packets for power saving stations */ bool pspolling; /* * PS can only be enabled when we have exactly one managed * interface (and monitors) in PS, this then points there. */ struct ieee80211_sub_if_data *ps_sdata; struct wiphy_work dynamic_ps_enable_work; struct wiphy_work dynamic_ps_disable_work; struct timer_list dynamic_ps_timer; struct notifier_block ifa_notifier; struct notifier_block ifa6_notifier; /* * The dynamic ps timeout configured from user space via WEXT - * this will override whatever chosen by mac80211 internally. */ int dynamic_ps_forced_timeout; int user_power_level; /* in dBm, for all interfaces */ struct work_struct restart_work; #ifdef CONFIG_MAC80211_DEBUGFS struct local_debugfsdentries { struct dentry *rcdir; struct dentry *keys; } debugfs; bool force_tx_status; #endif /* * Remain-on-channel support */ struct wiphy_delayed_work roc_work; struct list_head roc_list; struct wiphy_work hw_roc_start, hw_roc_done; unsigned long hw_roc_start_time; u64 roc_cookie_counter; struct idr ack_status_frames; spinlock_t ack_status_lock; struct ieee80211_sub_if_data __rcu *p2p_sdata; /* virtual monitor interface */ struct ieee80211_sub_if_data __rcu *monitor_sdata; struct ieee80211_chan_req monitor_chanreq; /* extended capabilities provided by mac80211 */ u8 ext_capa[8]; bool wbrf_supported; }; static inline struct ieee80211_sub_if_data * IEEE80211_DEV_TO_SUB_IF(const struct net_device *dev) { return netdev_priv(dev); } static inline struct ieee80211_sub_if_data * IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev) { return container_of(wdev, struct ieee80211_sub_if_data, wdev); } static inline struct ieee80211_supported_band * ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; WARN_ON(ieee80211_vif_is_mld(&sdata->vif)); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return NULL; } band = chanctx_conf->def.chan->band; rcu_read_unlock(); return local->hw.wiphy->bands[band]; } static inline struct ieee80211_supported_band * ieee80211_get_link_sband(struct ieee80211_link_data *link) { struct ieee80211_local *local = link->sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; rcu_read_lock(); chanctx_conf = rcu_dereference(link->conf->chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return NULL; } band = chanctx_conf->def.chan->band; rcu_read_unlock(); return local->hw.wiphy->bands[band]; } /* this struct holds the value parsing from channel switch IE */ struct ieee80211_csa_ie { struct ieee80211_chan_req chanreq; u8 mode; u8 count; u8 ttl; u16 pre_value; u16 reason_code; u32 max_switch_time; }; enum ieee80211_elems_parse_error { IEEE80211_PARSE_ERR_INVALID_END = BIT(0), IEEE80211_PARSE_ERR_DUP_ELEM = BIT(1), IEEE80211_PARSE_ERR_BAD_ELEM_SIZE = BIT(2), IEEE80211_PARSE_ERR_UNEXPECTED_ELEM = BIT(3), IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC = BIT(4), }; /* Parsed Information Elements */ struct ieee802_11_elems { const u8 *ie_start; size_t total_len; u32 crc; /* pointers to IEs */ const struct ieee80211_tdls_lnkie *lnk_id; const struct ieee80211_ch_switch_timing *ch_sw_timing; const u8 *ext_capab; const u8 *ssid; const u8 *supp_rates; const u8 *ds_params; const struct ieee80211_tim_ie *tim; const u8 *rsn; const u8 *rsnx; const u8 *erp_info; const u8 *ext_supp_rates; const u8 *wmm_info; const u8 *wmm_param; const struct ieee80211_ht_cap *ht_cap_elem; const struct ieee80211_ht_operation *ht_operation; const struct ieee80211_vht_cap *vht_cap_elem; const struct ieee80211_vht_operation *vht_operation; const struct ieee80211_meshconf_ie *mesh_config; const u8 *he_cap; const struct ieee80211_he_operation *he_operation; const struct ieee80211_he_spr *he_spr; const struct ieee80211_mu_edca_param_set *mu_edca_param_set; const struct ieee80211_he_6ghz_capa *he_6ghz_capa; const u8 *uora_element; const u8 *mesh_id; const u8 *peering; const __le16 *awake_window; const u8 *preq; const u8 *prep; const u8 *perr; const struct ieee80211_rann_ie *rann; const struct ieee80211_channel_sw_ie *ch_switch_ie; const struct ieee80211_ext_chansw_ie *ext_chansw_ie; const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; const u8 *max_channel_switch_time; const u8 *country_elem; const u8 *pwr_constr_elem; const u8 *cisco_dtpc_elem; const struct ieee80211_timeout_interval_ie *timeout_int; const u8 *opmode_notif; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; const struct ieee80211_multiple_bssid_configuration *mbssid_config_ie; const struct ieee80211_bssid_index *bssid_index; u8 max_bssid_indicator; u8 dtim_count; u8 dtim_period; const struct ieee80211_addba_ext_ie *addba_ext_ie; const struct ieee80211_s1g_cap *s1g_capab; const struct ieee80211_s1g_oper_ie *s1g_oper; const struct ieee80211_s1g_bcn_compat_ie *s1g_bcn_compat; const struct ieee80211_aid_response_ie *aid_resp; const struct ieee80211_eht_cap_elem *eht_cap; const struct ieee80211_eht_operation *eht_operation; const struct ieee80211_multi_link_elem *ml_basic; const struct ieee80211_multi_link_elem *ml_reconf; const struct ieee80211_multi_link_elem *ml_epcs; const struct ieee80211_bandwidth_indication *bandwidth_indication; const struct ieee80211_ttlm_elem *ttlm[IEEE80211_TTLM_MAX_CNT]; /* not the order in the psd values is per element, not per chandef */ struct ieee80211_parsed_tpe tpe; struct ieee80211_parsed_tpe csa_tpe; /* length of them, respectively */ u8 ext_capab_len; u8 ssid_len; u8 supp_rates_len; u8 tim_len; u8 rsn_len; u8 rsnx_len; u8 ext_supp_rates_len; u8 wmm_info_len; u8 wmm_param_len; u8 he_cap_len; u8 mesh_id_len; u8 peering_len; u8 preq_len; u8 prep_len; u8 perr_len; u8 country_elem_len; u8 bssid_index_len; u8 eht_cap_len; /* mult-link element can be de-fragmented and thus u8 is not sufficient */ size_t ml_basic_len; size_t ml_reconf_len; size_t ml_epcs_len; u8 ttlm_num; /* * store the per station profile pointer and length in case that the * parsing also handled Multi-Link element parsing for a specific link * ID. */ struct ieee80211_mle_per_sta_profile *prof; size_t sta_prof_len; /* whether/which parse error occurred while retrieving these elements */ u8 parse_error; }; static inline struct ieee80211_local *hw_to_local( struct ieee80211_hw *hw) { return container_of(hw, struct ieee80211_local, hw); } static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq) { return container_of(txq, struct txq_info, txq); } static inline bool txq_has_queue(struct ieee80211_txq *txq) { struct txq_info *txqi = to_txq_info(txq); return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets); } static inline bool ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) { return status->flag & RX_FLAG_MACTIME; } void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata); /* This function returns the number of multicast stations connected to this * interface. It returns -1 if that number is not tracked, that is for netdevs * not in AP or AP_VLAN mode or when using 4addr. */ static inline int ieee80211_vif_get_num_mcast_if(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_AP) return atomic_read(&sdata->u.ap.num_mcast_sta); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) return atomic_read(&sdata->u.vlan.num_mcast_sta); return -1; } u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, struct ieee80211_rx_status *status, unsigned int mpdu_len, unsigned int mpdu_offset); int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); int ieee80211_hw_conf_chan(struct ieee80211_local *local); void ieee80211_hw_conf_init(struct ieee80211_local *local); void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed); void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed); void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, u64 changed); void ieee80211_configure_filter(struct ieee80211_local *local); u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); void ieee80211_handle_queued_frames(struct ieee80211_local *local); u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local); int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, u64 *cookie, gfp_t gfp); void ieee80211_check_fast_rx(struct sta_info *sta); void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata); void ieee80211_clear_fast_rx(struct sta_info *sta); bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr, int *out_link_id); /* STA code */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, struct cfg80211_auth_request *req); int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_request *req); int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, struct cfg80211_deauth_request *req); int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_disassoc_request *req); void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_ps(struct ieee80211_local *local); void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, __le16 fc, bool acked); void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 reason, bool tx); void ieee80211_mgd_setup_link(struct ieee80211_link_data *link); void ieee80211_mgd_stop_link(struct ieee80211_link_data *link); void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates); int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params); int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings, u64 *changed); int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed); void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata); /* OCB code */ void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata); void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates); void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata); int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, struct ocb_setup *setup); int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata); /* mesh code */ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings, u64 *changed); int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed); /* scan/BSS handling */ void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work); int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, const u8 *ssid, u8 ssid_len, struct ieee80211_channel **channels, unsigned int n_channels); int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req); void ieee80211_scan_cancel(struct ieee80211_local *local); void ieee80211_run_deferred_scan(struct ieee80211_local *local); void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb); void ieee80211_inform_bss(struct wiphy *wiphy, struct cfg80211_bss *bss, const struct cfg80211_bss_ies *ies, void *data); void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); struct ieee80211_bss * ieee80211_bss_info_update(struct ieee80211_local *local, struct ieee80211_rx_status *rx_status, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_channel *channel); void ieee80211_rx_bss_put(struct ieee80211_local *local, struct ieee80211_bss *bss); /* scheduled scan handling */ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req); int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req); int ieee80211_request_sched_scan_stop(struct ieee80211_local *local); void ieee80211_sched_scan_end(struct ieee80211_local *local); void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy, struct wiphy_work *work); /* off-channel/mgmt-tx */ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); void ieee80211_offchannel_return(struct ieee80211_local *local); void ieee80211_roc_setup(struct ieee80211_local *local); void ieee80211_start_next_roc(struct ieee80211_local *local); void ieee80211_reconfig_roc(struct ieee80211_local *local); void ieee80211_roc_purge(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie); int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie); /* channel switch handling */ void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work); int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_csa_settings *params); /* color change handling */ void ieee80211_color_change_finalize_work(struct wiphy *wiphy, struct wiphy_work *work); void ieee80211_color_collision_detection_work(struct wiphy *wiphy, struct wiphy_work *work); /* interface handling */ #define MAC80211_SUPPORTED_FEATURES_TX (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_HIGHDMA | NETIF_F_GSO_SOFTWARE | \ NETIF_F_HW_TC) #define MAC80211_SUPPORTED_FEATURES_RX (NETIF_F_RXCSUM) #define MAC80211_SUPPORTED_FEATURES (MAC80211_SUPPORTED_FEATURES_TX | \ MAC80211_SUPPORTED_FEATURES_RX) int ieee80211_iface_init(void); void ieee80211_iface_exit(void); int ieee80211_if_add(struct ieee80211_local *local, const char *name, unsigned char name_assign_type, struct wireless_dev **new_wdev, enum nl80211_iftype type, struct vif_params *params); int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type); void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); void ieee80211_remove_interfaces(struct ieee80211_local *local); u32 ieee80211_idle_off(struct ieee80211_local *local); void ieee80211_recalc_idle(struct ieee80211_local *local); void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset); int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up); void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); int ieee80211_add_virtual_monitor(struct ieee80211_local *local); void ieee80211_del_virtual_monitor(struct ieee80211_local *local); bool __ieee80211_recalc_txpower(struct ieee80211_link_data *link); void ieee80211_recalc_txpower(struct ieee80211_link_data *link, bool update_bss); void ieee80211_recalc_offload(struct ieee80211_local *local); static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) { return test_bit(SDATA_STATE_RUNNING, &sdata->state); } /* link handling */ void ieee80211_link_setup(struct ieee80211_link_data *link); void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, int link_id, struct ieee80211_link_data *link, struct ieee80211_bss_conf *link_conf); void ieee80211_link_stop(struct ieee80211_link_data *link); int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, u16 new_links, u16 dormant_links); static inline void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata) { ieee80211_vif_set_links(sdata, 0, 0); } void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata); void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata); /* tx handling */ void ieee80211_clear_tx_pending(struct ieee80211_local *local); void ieee80211_tx_pending(struct tasklet_struct *t); netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb, struct net_device *dev); void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev, u32 info_flags, u32 ctrl_flags, u64 *cookie); struct sk_buff * ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags); void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, int retry_count, struct ieee80211_tx_status *status); void ieee80211_check_fast_xmit(struct sta_info *sta); void ieee80211_check_fast_xmit_all(struct ieee80211_local *local); void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata); void ieee80211_clear_fast_xmit(struct sta_info *sta); int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, __be16 proto, bool unencrypted, int link_id, u64 *cookie); int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len); void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb, bool ampdu, const u8 *da, const u8 *sa); void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb); /* HT */ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap); bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_ht_cap *ht_cap_ie, struct link_sta_info *link_sta); void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid, u16 initiator, u16 reason_code); int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps, const u8 *da, const u8 *bssid, int link_id); void ieee80211_add_addbaext(struct sk_buff *skb, const u8 req_addba_ext_data, u16 buf_size); u8 ieee80211_retrieve_addba_ext_data(struct sta_info *sta, const void *elem_data, ssize_t elem_len, u16 *buf_size); void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool stop); void __ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, u16 buf_size, bool tx, bool auto_seq, const u8 addba_ext_data); void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, enum ieee80211_agg_stop_reason reason); void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len); void ieee80211_process_addba_resp(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len); void ieee80211_process_addba_request(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len); static inline struct ieee80211_mgmt * ieee80211_mgmt_ba(struct sk_buff *skb, const u8 *da, struct ieee80211_sub_if_data *sdata) { struct ieee80211_mgmt *mgmt = skb_put_zero(skb, 24); ether_addr_copy(mgmt->da, da); ether_addr_copy(mgmt->sa, sdata->vif.addr); if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ether_addr_copy(mgmt->bssid, sdata->vif.addr); else if (sdata->vif.type == NL80211_IFTYPE_STATION) ether_addr_copy(mgmt->bssid, sdata->vif.cfg.ap_addr); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) ether_addr_copy(mgmt->bssid, sdata->u.ibss.bssid); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); return mgmt; } int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_agg_stop_reason reason); void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx); void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx); void ieee80211_ba_session_work(struct wiphy *wiphy, struct wiphy_work *work); void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs); enum nl80211_smps_mode ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps); /* VHT */ void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_vht_cap *vht_cap_ie, const struct ieee80211_vht_cap *vht_cap_ie2, struct link_sta_info *link_sta); enum ieee80211_sta_rx_bandwidth _ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta, struct cfg80211_chan_def *chandef); static inline enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta) { return _ieee80211_sta_cap_rx_bw(link_sta, NULL); } enum ieee80211_sta_rx_bandwidth _ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta, struct cfg80211_chan_def *chandef); static inline enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta) { return _ieee80211_sta_cur_vht_bw(link_sta, NULL); } void ieee80211_sta_init_nss(struct link_sta_info *link_sta); enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta); void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct ieee80211_mgmt *mgmt); u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct link_sta_info *sta, u8 opmode, enum nl80211_band band); void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct link_sta_info *sta, u8 opmode, enum nl80211_band band); void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap); void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, u16 vht_mask[NL80211_VHT_NSS_MAX]); enum nl80211_chan_width ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *sta); /* HE */ void ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const u8 *he_cap_ie, u8 he_cap_len, const struct ieee80211_he_6ghz_capa *he_6ghz_capa, struct link_sta_info *link_sta); void ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif, const struct ieee80211_he_spr *he_spr_ie_elem); void ieee80211_he_op_ie_to_bss_conf(struct ieee80211_vif *vif, const struct ieee80211_he_operation *he_op_ie_elem); /* S1G */ void ieee80211_s1g_sta_rate_init(struct sta_info *sta); bool ieee80211_s1g_is_twt_setup(struct sk_buff *skb); void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); /** * ieee80211_parse_ch_switch_ie - parses channel switch IEs * @sdata: the sdata of the interface which has received the frame * @elems: parsed 802.11 elements received with the frame * @current_band: indicates the current band * @vht_cap_info: VHT capabilities of the transmitter * @conn: contains information about own capabilities and restrictions * to decide which channel switch announcements can be accepted * @bssid: the currently connected bssid (for reporting) * @unprot_action: whether the frame was an unprotected frame or not, * used for reporting * @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl. * All of them will be filled with if success only. * Return: 0 on success, <0 on error and >0 if there is nothing to parse. */ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band current_band, u32 vht_cap_info, struct ieee80211_conn_settings *conn, u8 *bssid, bool unprot_action, struct ieee80211_csa_ie *csa_ie); /* Suspend/resume and hw reconfiguration */ int ieee80211_reconfig(struct ieee80211_local *local); void ieee80211_stop_device(struct ieee80211_local *local, bool suspend); int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); static inline int __ieee80211_resume(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) && !test_bit(SCAN_COMPLETED, &local->scanning), "%s: resume with hardware scan still in progress\n", wiphy_name(hw->wiphy)); return ieee80211_reconfig(hw_to_local(hw)); } /* utility functions/constants */ extern const void *const mac80211_wiphy_privid; /* for wiphy privid */ const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode); enum ieee80211_conn_bw_limit ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef); int ieee80211_frame_duration(enum nl80211_band band, size_t len, int rate, int erp, int short_preamble); void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_queue_params *qparam, int ac); void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe); void ieee80211_set_wmm_default(struct ieee80211_link_data *link, bool bss_notify, bool enable_qos); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, int link_id, enum nl80211_band band); /* sta_out needs to be checked for ERR_PTR() before using */ int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct sta_info **sta_out); static inline void ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, enum nl80211_band band) { rcu_read_lock(); __ieee80211_tx_skb_tid_band(sdata, skb, tid, -1, band); rcu_read_unlock(); } void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, int link_id); static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ ieee80211_tx_skb_tid(sdata, skb, 7, -1); } /** * struct ieee80211_elems_parse_params - element parsing parameters * @mode: connection mode for parsing * @start: pointer to the elements * @len: length of the elements * @action: %true if the elements came from an action frame * @filter: bitmap of element IDs to filter out while calculating * the element CRC * @crc: CRC starting value * @bss: the BSS to parse this as, for multi-BSSID cases this can * represent a non-transmitting BSS in which case the data * for that non-transmitting BSS is returned * @link_id: the link ID to parse elements for, if a STA profile * is present in the multi-link element, or -1 to ignore; * note that the code currently assumes parsing an association * (or re-association) response frame if this is given * @from_ap: frame is received from an AP (currently used only * for EHT capabilities parsing) */ struct ieee80211_elems_parse_params { enum ieee80211_conn_mode mode; const u8 *start; size_t len; bool action; u64 filter; u32 crc; struct cfg80211_bss *bss; int link_id; bool from_ap; }; struct ieee802_11_elems * ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params); static inline struct ieee802_11_elems * ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, u64 filter, u32 crc, struct cfg80211_bss *bss) { struct ieee80211_elems_parse_params params = { .mode = IEEE80211_CONN_MODE_HIGHEST, .start = start, .len = len, .action = action, .filter = filter, .crc = crc, .bss = bss, .link_id = -1, }; return ieee802_11_parse_elems_full(¶ms); } static inline struct ieee802_11_elems * ieee802_11_parse_elems(const u8 *start, size_t len, bool action, struct cfg80211_bss *bss) { return ieee802_11_parse_elems_crc(start, len, action, 0, 0, bss); } extern const int ieee802_1d_to_ac[8]; static inline int ieee80211_ac_from_tid(int tid) { return ieee802_1d_to_ac[tid & 7]; } void ieee80211_dynamic_ps_enable_work(struct wiphy *wiphy, struct wiphy_work *work); void ieee80211_dynamic_ps_disable_work(struct wiphy *wiphy, struct wiphy_work *work); void ieee80211_dynamic_ps_timer(struct timer_list *t); void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool powersave); void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, bool ack, u16 tx_time); unsigned int ieee80211_get_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted); void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted); void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted); void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted); static inline void ieee80211_stop_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_stop_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, true); } static inline void ieee80211_wake_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_wake_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, true); } static inline void ieee80211_stop_vif_queues_norefcount(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_stop_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, false); } static inline void ieee80211_wake_vif_queues_norefcount(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_wake_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, false); } void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); void ieee80211_add_pending_skbs(struct ieee80211_local *local, struct sk_buff_head *skbs); void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool drop); void __ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int queues, bool drop); static inline bool ieee80211_can_run_worker(struct ieee80211_local *local) { /* * It's unsafe to try to do any work during reconfigure flow. * When the flow ends the work will be requeued. */ if (local->in_reconfig) return false; /* * If quiescing is set, we are racing with __ieee80211_suspend. * __ieee80211_suspend flushes the workers after setting quiescing, * and we check quiescing / suspended before enqueuing new workers. * We should abort the worker to avoid the races below. */ if (local->quiescing) return false; /* * We might already be suspended if the following scenario occurs: * __ieee80211_suspend Control path * * if (local->quiescing) * return; * local->quiescing = true; * flush_workqueue(); * queue_work(...); * local->suspended = true; * local->quiescing = false; * worker starts running... */ if (local->suspended) return false; return true; } int ieee80211_txq_setup_flows(struct ieee80211_local *local); void ieee80211_txq_set_params(struct ieee80211_local *local); void ieee80211_txq_teardown_flows(struct ieee80211_local *local); void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct txq_info *txq, int tid); void ieee80211_txq_purge(struct ieee80211_local *local, struct txq_info *txqi); void ieee80211_purge_sta_txqs(struct sta_info *sta); void ieee80211_txq_remove_vlan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats, struct txq_info *txqi); void ieee80211_wake_txqs(struct tasklet_struct *t); void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *bssid, const u8 *da, const u8 *key, u8 key_len, u8 key_idx, u32 tx_flags); void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *da, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf); enum { IEEE80211_PROBE_FLAG_DIRECTED = BIT(0), IEEE80211_PROBE_FLAG_MIN_CONTENT = BIT(1), IEEE80211_PROBE_FLAG_RANDOM_SN = BIT(2), }; int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer, size_t buffer_len, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, struct cfg80211_chan_def *chandef, u32 flags); struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 flags); u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates); int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, enum ieee80211_smps_mode smps_mode); void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link); void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata, int link_id); size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap); u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode, bool rifs_mode); void ieee80211_ie_build_wide_bw_cs(u8 *pos, const struct cfg80211_chan_def *chandef); u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap); u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, const struct cfg80211_chan_def *chandef); u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata); u8 *ieee80211_ie_build_he_oper(u8 *pos, const struct cfg80211_chan_def *chandef); u8 *ieee80211_ie_build_eht_oper(u8 *pos, const struct cfg80211_chan_def *chandef, const struct ieee80211_sta_eht_cap *eht_cap); int ieee80211_parse_bitrates(enum nl80211_chan_width width, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates); u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo); void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_s1g_cap *caps, struct sk_buff *skb); void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); /* element building in SKBs */ int ieee80211_put_srates_elem(struct sk_buff *skb, const struct ieee80211_supported_band *sband, u32 basic_rates, u32 masked_rates, u8 element_id); int ieee80211_put_he_cap(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, const struct ieee80211_supported_band *sband, const struct ieee80211_conn_settings *conn); int ieee80211_put_he_6ghz_cap(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode); int ieee80211_put_eht_cap(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, const struct ieee80211_supported_band *sband, const struct ieee80211_conn_settings *conn); /* channel management */ bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, struct cfg80211_chan_def *chandef); bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info, const struct ieee80211_vht_operation *oper, const struct ieee80211_ht_operation *htop, struct cfg80211_chan_def *chandef); void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info, struct cfg80211_chan_def *chandef); bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local, const struct ieee80211_he_operation *he_oper, const struct ieee80211_eht_operation *eht_oper, struct cfg80211_chan_def *chandef); bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper, struct cfg80211_chan_def *chandef); void ieee80211_chandef_downgrade(struct cfg80211_chan_def *chandef, struct ieee80211_conn_settings *conn); static inline void ieee80211_chanreq_downgrade(struct ieee80211_chan_req *chanreq, struct ieee80211_conn_settings *conn) { ieee80211_chandef_downgrade(&chanreq->oper, conn); if (WARN_ON(!conn)) return; if (conn->mode < IEEE80211_CONN_MODE_EHT) chanreq->ap.chan = NULL; } bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a, const struct ieee80211_chan_req *b); int __must_check _ieee80211_link_use_channel(struct ieee80211_link_data *link, const struct ieee80211_chan_req *req, enum ieee80211_chanctx_mode mode, bool assign_on_failure); static inline int __must_check ieee80211_link_use_channel(struct ieee80211_link_data *link, const struct ieee80211_chan_req *req, enum ieee80211_chanctx_mode mode) { return _ieee80211_link_use_channel(link, req, mode, false); } int __must_check ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, const struct ieee80211_chan_req *req, enum ieee80211_chanctx_mode mode, bool radar_required); int __must_check ieee80211_link_use_reserved_context(struct ieee80211_link_data *link); int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link); int __must_check ieee80211_link_change_chanreq(struct ieee80211_link_data *link, const struct ieee80211_chan_req *req, u64 *changed); void __ieee80211_link_release_channel(struct ieee80211_link_data *link, bool skip_idle_recalc); void ieee80211_link_release_channel(struct ieee80211_link_data *link); void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link); void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link, bool clear); int ieee80211_chanctx_refcount(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_link_data *rsvd_for, bool check_reserved); bool ieee80211_is_radar_required(struct ieee80211_local *local); void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work); void ieee80211_dfs_cac_cancel(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy, struct wiphy_work *work); int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings); void ieee80211_recalc_dtim(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, u8 radar_detect, int radio_idx); int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx); u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev); void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); /* TDLS */ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, int link_id, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len); int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); void ieee80211_tdls_peer_del_work(struct wiphy *wiphy, struct wiphy_work *wk); int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef); void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr); void ieee80211_teardown_tdls_peers(struct ieee80211_link_data *link); void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, const u8 *peer, u16 reason); void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); const char *ieee80211_get_reason_code_string(u16 reason_code); u16 ieee80211_encode_usf(int val); u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type); extern const struct ethtool_ops ieee80211_ethtool_ops; u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *pubsta, int len, bool ampdu); #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline #else #define debug_noinline #endif void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache); void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache); u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata); void ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const u8 *he_cap_ie, u8 he_cap_len, const struct ieee80211_eht_cap_elem *eht_cap_ie_elem, u8 eht_cap_len, struct link_sta_info *link_sta); void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata, struct cfg80211_ttlm_params *params); void ieee80211_process_ttlm_teardown(struct ieee80211_sub_if_data *sdata); void ieee80211_check_wbrf_support(struct ieee80211_local *local); void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef); void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef); int ieee80211_mgd_set_epcs(struct ieee80211_sub_if_data *sdata, bool enable); void ieee80211_process_epcs_ena_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); void ieee80211_process_epcs_teardown(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, struct cfg80211_ml_reconf_req *req); void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata); #if IS_ENABLED(CONFIG_MAC80211_KUNIT_TEST) #define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym) #define VISIBLE_IF_MAC80211_KUNIT ieee80211_rx_result ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx); int ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap, u8 n_partial_subchans); void ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd, const struct cfg80211_chan_def *ap, const struct cfg80211_chan_def *used); struct ieee802_11_elems * ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, struct ieee80211_conn_settings *conn, struct cfg80211_bss *cbss, int link_id, struct ieee80211_chan_req *chanreq, struct cfg80211_chan_def *ap_chandef, unsigned long *userspace_selectors); #else #define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym) #define VISIBLE_IF_MAC80211_KUNIT static #endif #endif /* IEEE80211_I_H */ |
1 1 1 2 4 3 3 3 1 1 5 3 5 5 1 4 4 5 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat Pyra driver for Linux * * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ /* * Roccat Pyra is a mobile gamer mouse which comes in wired and wireless * variant. Wireless variant is not tested. * Userland tools can be found at http://sourceforge.net/projects/roccat */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-pyra.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; static void profile_activated(struct pyra_device *pyra, unsigned int new_profile) { if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) return; pyra->actual_profile = new_profile; pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; } static int pyra_send_control(struct usb_device *usb_dev, int value, enum pyra_control_requests request) { struct roccat_common2_control control; if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS || request == PYRA_CONTROL_REQUEST_PROFILE_BUTTONS) && (value < 0 || value > 4)) return -EINVAL; control.command = ROCCAT_COMMON_COMMAND_CONTROL; control.value = value; control.request = request; return roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL, &control, sizeof(struct roccat_common2_control)); } static int pyra_get_profile_settings(struct usb_device *usb_dev, struct pyra_profile_settings *buf, int number) { int retval; retval = pyra_send_control(usb_dev, number, PYRA_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, buf, PYRA_SIZE_PROFILE_SETTINGS); } static int pyra_get_settings(struct usb_device *usb_dev, struct pyra_settings *buf) { return roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS, buf, PYRA_SIZE_SETTINGS); } static int pyra_set_settings(struct usb_device *usb_dev, struct pyra_settings const *settings) { return roccat_common2_send_with_status(usb_dev, PYRA_COMMAND_SETTINGS, settings, PYRA_SIZE_SETTINGS); } static ssize_t pyra_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&pyra->pyra_lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&pyra->pyra_lock); if (retval) return retval; return real_size; } static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&pyra->pyra_lock); retval = roccat_common2_send_with_status(usb_dev, command, (void *)buf, real_size); mutex_unlock(&pyra->pyra_lock); if (retval) return retval; return real_size; } #define PYRA_SYSFS_W(thingy, THINGY) \ static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \ struct kobject *kobj, const struct bin_attribute *attr, \ char *buf, loff_t off, size_t count) \ { \ return pyra_sysfs_write(fp, kobj, buf, off, count, \ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \ } #define PYRA_SYSFS_R(thingy, THINGY) \ static ssize_t pyra_sysfs_read_ ## thingy(struct file *fp, \ struct kobject *kobj, const struct bin_attribute *attr, \ char *buf, loff_t off, size_t count) \ { \ return pyra_sysfs_read(fp, kobj, buf, off, count, \ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \ } #define PYRA_SYSFS_RW(thingy, THINGY) \ PYRA_SYSFS_W(thingy, THINGY) \ PYRA_SYSFS_R(thingy, THINGY) #define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \ PYRA_SYSFS_RW(thingy, THINGY); \ static const struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0660 }, \ .size = PYRA_SIZE_ ## THINGY, \ .read_new = pyra_sysfs_read_ ## thingy, \ .write_new = pyra_sysfs_write_ ## thingy \ } #define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \ PYRA_SYSFS_R(thingy, THINGY); \ static const struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0440 }, \ .size_new = PYRA_SIZE_ ## THINGY, \ .read_new = pyra_sysfs_read_ ## thingy, \ } #define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \ PYRA_SYSFS_W(thingy, THINGY); \ static const struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0220 }, \ .size = PYRA_SIZE_ ## THINGY, \ .write_new = pyra_sysfs_write_ ## thingy \ } PYRA_BIN_ATTRIBUTE_W(control, CONTROL); PYRA_BIN_ATTRIBUTE_RW(info, INFO); PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS); PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS); static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); ssize_t retval; retval = pyra_send_control(usb_dev, *(uint *)(attr->private), PYRA_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return pyra_sysfs_read(fp, kobj, buf, off, count, PYRA_SIZE_PROFILE_SETTINGS, PYRA_COMMAND_PROFILE_SETTINGS); } static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); ssize_t retval; retval = pyra_send_control(usb_dev, *(uint *)(attr->private), PYRA_CONTROL_REQUEST_PROFILE_BUTTONS); if (retval) return retval; return pyra_sysfs_read(fp, kobj, buf, off, count, PYRA_SIZE_PROFILE_BUTTONS, PYRA_COMMAND_PROFILE_BUTTONS); } #define PROFILE_ATTR(number) \ static const struct bin_attribute bin_attr_profile##number##_settings = { \ .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \ .size = PYRA_SIZE_PROFILE_SETTINGS, \ .read_new = pyra_sysfs_read_profilex_settings, \ .private = &profile_numbers[number-1], \ }; \ static const struct bin_attribute bin_attr_profile##number##_buttons = { \ .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \ .size = PYRA_SIZE_PROFILE_BUTTONS, \ .read_new = pyra_sysfs_read_profilex_buttons, \ .private = &profile_numbers[number-1], \ }; PROFILE_ATTR(1); PROFILE_ATTR(2); PROFILE_ATTR(3); PROFILE_ATTR(4); PROFILE_ATTR(5); static ssize_t pyra_sysfs_write_settings(struct file *fp, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0; struct pyra_roccat_report roccat_report; struct pyra_settings const *settings; if (off != 0 || count != PYRA_SIZE_SETTINGS) return -EINVAL; settings = (struct pyra_settings const *)buf; if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) return -EINVAL; mutex_lock(&pyra->pyra_lock); retval = pyra_set_settings(usb_dev, settings); if (retval) { mutex_unlock(&pyra->pyra_lock); return retval; } profile_activated(pyra, settings->startup_profile); roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2; roccat_report.value = settings->startup_profile + 1; roccat_report.key = 0; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); mutex_unlock(&pyra->pyra_lock); return PYRA_SIZE_SETTINGS; } PYRA_SYSFS_R(settings, SETTINGS); static const struct bin_attribute bin_attr_settings = __BIN_ATTR(settings, (S_IWUSR | S_IRUGO), pyra_sysfs_read_settings, pyra_sysfs_write_settings, PYRA_SIZE_SETTINGS); static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return sysfs_emit(buf, "%d\n", pyra->actual_cpi); } static DEVICE_ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL); static ssize_t pyra_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); struct pyra_settings settings; mutex_lock(&pyra->pyra_lock); roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS, &settings, PYRA_SIZE_SETTINGS); mutex_unlock(&pyra->pyra_lock); return sysfs_emit(buf, "%d\n", settings.startup_profile); } static DEVICE_ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL); static DEVICE_ATTR(startup_profile, 0440, pyra_sysfs_show_actual_profile, NULL); static ssize_t pyra_sysfs_show_firmware_version(struct device *dev, struct device_attribute *attr, char *buf) { struct pyra_device *pyra; struct usb_device *usb_dev; struct pyra_info info; dev = dev->parent->parent; pyra = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); mutex_lock(&pyra->pyra_lock); roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO, &info, PYRA_SIZE_INFO); mutex_unlock(&pyra->pyra_lock); return sysfs_emit(buf, "%d\n", info.firmware_version); } static DEVICE_ATTR(firmware_version, 0440, pyra_sysfs_show_firmware_version, NULL); static struct attribute *pyra_attrs[] = { &dev_attr_actual_cpi.attr, &dev_attr_actual_profile.attr, &dev_attr_firmware_version.attr, &dev_attr_startup_profile.attr, NULL, }; static const struct bin_attribute *const pyra_bin_attributes[] = { &bin_attr_control, &bin_attr_info, &bin_attr_profile_settings, &bin_attr_profile_buttons, &bin_attr_settings, &bin_attr_profile1_settings, &bin_attr_profile2_settings, &bin_attr_profile3_settings, &bin_attr_profile4_settings, &bin_attr_profile5_settings, &bin_attr_profile1_buttons, &bin_attr_profile2_buttons, &bin_attr_profile3_buttons, &bin_attr_profile4_buttons, &bin_attr_profile5_buttons, NULL, }; static const struct attribute_group pyra_group = { .attrs = pyra_attrs, .bin_attrs_new = pyra_bin_attributes, }; static const struct attribute_group *pyra_groups[] = { &pyra_group, NULL, }; /* pyra_class is used for creating sysfs attributes via roccat char device */ static const struct class pyra_class = { .name = "pyra", .dev_groups = pyra_groups, }; static int pyra_init_pyra_device_struct(struct usb_device *usb_dev, struct pyra_device *pyra) { struct pyra_settings settings; int retval, i; mutex_init(&pyra->pyra_lock); retval = pyra_get_settings(usb_dev, &settings); if (retval) return retval; for (i = 0; i < 5; ++i) { retval = pyra_get_profile_settings(usb_dev, &pyra->profile_settings[i], i); if (retval) return retval; } profile_activated(pyra, settings.startup_profile); return 0; } static int pyra_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct pyra_device *pyra; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { pyra = kzalloc(sizeof(*pyra), GFP_KERNEL); if (!pyra) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, pyra); retval = pyra_init_pyra_device_struct(usb_dev, pyra); if (retval) { hid_err(hdev, "couldn't init struct pyra_device\n"); goto exit_free; } retval = roccat_connect(&pyra_class, hdev, sizeof(struct pyra_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { pyra->chrdev_minor = retval; pyra->roccat_claimed = 1; } } else { hid_set_drvdata(hdev, NULL); } return 0; exit_free: kfree(pyra); return retval; } static void pyra_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct pyra_device *pyra; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { pyra = hid_get_drvdata(hdev); if (pyra->roccat_claimed) roccat_disconnect(pyra->chrdev_minor); kfree(hid_get_drvdata(hdev)); } } static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; if (!hid_is_usb(hdev)) return -EINVAL; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = pyra_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void pyra_remove(struct hid_device *hdev) { pyra_remove_specials(hdev); hid_hw_stop(hdev); } static void pyra_keep_values_up_to_date(struct pyra_device *pyra, u8 const *data) { struct pyra_mouse_event_button const *button_event; switch (data[0]) { case PYRA_MOUSE_REPORT_NUMBER_BUTTON: button_event = (struct pyra_mouse_event_button const *)data; switch (button_event->type) { case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2: profile_activated(pyra, button_event->data1 - 1); break; case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI: pyra->actual_cpi = button_event->data1; break; } break; } } static void pyra_report_to_chrdev(struct pyra_device const *pyra, u8 const *data) { struct pyra_roccat_report roccat_report; struct pyra_mouse_event_button const *button_event; if (data[0] != PYRA_MOUSE_REPORT_NUMBER_BUTTON) return; button_event = (struct pyra_mouse_event_button const *)data; switch (button_event->type) { case PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2: case PYRA_MOUSE_EVENT_BUTTON_TYPE_CPI: roccat_report.type = button_event->type; roccat_report.value = button_event->data1; roccat_report.key = 0; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); break; case PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO: case PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT: case PYRA_MOUSE_EVENT_BUTTON_TYPE_QUICKLAUNCH: if (button_event->data2 == PYRA_MOUSE_EVENT_BUTTON_PRESS) { roccat_report.type = button_event->type; roccat_report.key = button_event->data1; /* * pyra reports profile numbers with range 1-5. * Keeping this behaviour. */ roccat_report.value = pyra->actual_profile + 1; roccat_report_event(pyra->chrdev_minor, (uint8_t const *)&roccat_report); } break; } } static int pyra_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct pyra_device *pyra = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return 0; if (pyra == NULL) return 0; pyra_keep_values_up_to_date(pyra, data); if (pyra->roccat_claimed) pyra_report_to_chrdev(pyra, data); return 0; } static const struct hid_device_id pyra_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) }, { } }; MODULE_DEVICE_TABLE(hid, pyra_devices); static struct hid_driver pyra_driver = { .name = "pyra", .id_table = pyra_devices, .probe = pyra_probe, .remove = pyra_remove, .raw_event = pyra_raw_event }; static int __init pyra_init(void) { int retval; /* class name has to be same as driver name */ retval = class_register(&pyra_class); if (retval) return retval; retval = hid_register_driver(&pyra_driver); if (retval) class_unregister(&pyra_class); return retval; } static void __exit pyra_exit(void) { hid_unregister_driver(&pyra_driver); class_unregister(&pyra_class); } module_init(pyra_init); module_exit(pyra_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Pyra driver"); MODULE_LICENSE("GPL v2"); |
16 15 16 16 16 16 16 16 16 16 16 16 16 16 16 15 16 16 16 16 16 15 16 16 16 15 16 16 16 16 16 16 16 16 16 15 16 16 16 16 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 | // SPDX-License-Identifier: GPL-2.0-or-later /* Virtio ring implementation. * * Copyright 2007 Rusty Russell IBM Corporation */ #include <linux/virtio.h> #include <linux/virtio_ring.h> #include <linux/virtio_config.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/hrtimer.h> #include <linux/dma-mapping.h> #include <linux/kmsan.h> #include <linux/spinlock.h> #include <xen/xen.h> #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ BUG(); \ } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ panic("%s:in_use = %i\n", \ (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #define LAST_ADD_TIME_UPDATE(_vq) \ do { \ ktime_t now = ktime_get(); \ \ /* No kick or get, with .1 second between? Warn. */ \ if ((_vq)->last_add_time_valid) \ WARN_ON(ktime_to_ms(ktime_sub(now, \ (_vq)->last_add_time)) > 100); \ (_vq)->last_add_time = now; \ (_vq)->last_add_time_valid = true; \ } while (0) #define LAST_ADD_TIME_CHECK(_vq) \ do { \ if ((_vq)->last_add_time_valid) { \ WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ (_vq)->last_add_time)) > 100); \ } \ } while (0) #define LAST_ADD_TIME_INVALID(_vq) \ ((_vq)->last_add_time_valid = false) #else #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ (_vq)->broken = true; \ } while (0) #define START_USE(vq) #define END_USE(vq) #define LAST_ADD_TIME_UPDATE(vq) #define LAST_ADD_TIME_CHECK(vq) #define LAST_ADD_TIME_INVALID(vq) #endif struct vring_desc_state_split { void *data; /* Data for callback. */ /* Indirect desc table and extra table, if any. These two will be * allocated together. So we won't stress more to the memory allocator. */ struct vring_desc *indir_desc; }; struct vring_desc_state_packed { void *data; /* Data for callback. */ /* Indirect desc table and extra table, if any. These two will be * allocated together. So we won't stress more to the memory allocator. */ struct vring_packed_desc *indir_desc; u16 num; /* Descriptor list length. */ u16 last; /* The last desc state in a list. */ }; struct vring_desc_extra { dma_addr_t addr; /* Descriptor DMA addr. */ u32 len; /* Descriptor length. */ u16 flags; /* Descriptor flags. */ u16 next; /* The next desc state in a list. */ }; struct vring_virtqueue_split { /* Actual memory layout for this queue. */ struct vring vring; /* Last written value to avail->flags */ u16 avail_flags_shadow; /* * Last written value to avail->idx in * guest byte order. */ u16 avail_idx_shadow; /* Per-descriptor state. */ struct vring_desc_state_split *desc_state; struct vring_desc_extra *desc_extra; /* DMA address and size information */ dma_addr_t queue_dma_addr; size_t queue_size_in_bytes; /* * The parameters for creating vrings are reserved for creating new * vring. */ u32 vring_align; bool may_reduce_num; }; struct vring_virtqueue_packed { /* Actual memory layout for this queue. */ struct { unsigned int num; struct vring_packed_desc *desc; struct vring_packed_desc_event *driver; struct vring_packed_desc_event *device; } vring; /* Driver ring wrap counter. */ bool avail_wrap_counter; /* Avail used flags. */ u16 avail_used_flags; /* Index of the next avail descriptor. */ u16 next_avail_idx; /* * Last written value to driver->flags in * guest byte order. */ u16 event_flags_shadow; /* Per-descriptor state. */ struct vring_desc_state_packed *desc_state; struct vring_desc_extra *desc_extra; /* DMA address and size information */ dma_addr_t ring_dma_addr; dma_addr_t driver_event_dma_addr; dma_addr_t device_event_dma_addr; size_t ring_size_in_bytes; size_t event_size_in_bytes; }; struct vring_virtqueue { struct virtqueue vq; /* Is this a packed ring? */ bool packed_ring; /* Is DMA API used? */ bool use_dma_api; /* Can we use weak barriers? */ bool weak_barriers; /* Other side has made a mess, don't try any more. */ bool broken; /* Host supports indirect buffers */ bool indirect; /* Host publishes avail event idx */ bool event; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ unsigned int num_added; /* Last used index we've seen. * for split ring, it just contains last used index * for packed ring: * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter. */ u16 last_used_idx; /* Hint for event idx: already triggered no need to disable. */ bool event_triggered; union { /* Available for split ring */ struct vring_virtqueue_split split; /* Available for packed ring */ struct vring_virtqueue_packed packed; }; /* How to notify other side. FIXME: commonalize hcalls! */ bool (*notify)(struct virtqueue *vq); /* DMA, allocation, and size information */ bool we_own_ring; /* Device used for doing DMA */ struct device *dma_dev; #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; /* Figure out if their kicks are too delayed. */ bool last_add_time_valid; ktime_t last_add_time; #endif }; static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num); static void vring_free(struct virtqueue *_vq); /* * Helpers. */ #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq) static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, unsigned int total_sg) { /* * If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ return (vq->indirect && total_sg > 1 && vq->vq.num_free); } /* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. * * If there, the interaction between virtio and DMA API is messy. * * On most systems with virtio, physical addresses match bus addresses, * and it doesn't particularly matter whether we use the DMA API. * * On some systems, including Xen and any system with a physical device * that speaks virtio behind a physical IOMMU, we must use the DMA API * for virtio DMA to work at all. * * On other systems, including SPARC and PPC64, virtio-pci devices are * enumerated as though they are behind an IOMMU, but the virtio host * ignores the IOMMU, so we must either pretend that the IOMMU isn't * there or somehow map everything as the identity. * * For the time being, we preserve historic behavior and bypass the DMA * API. * * TODO: install a per-device DMA ops structure that does the right thing * taking into account all the above quirks, and use the DMA API * unconditionally on data path. */ static bool vring_use_dma_api(const struct virtio_device *vdev) { if (!virtio_has_dma_quirk(vdev)) return true; /* Otherwise, we are left to guess. */ /* * In theory, it's possible to have a buggy QEMU-supposed * emulated Q35 IOMMU and Xen enabled at the same time. On * such a configuration, virtio has never worked and will * not work without an even larger kludge. Instead, enable * the DMA API if we're a Xen guest, which at least allows * all of the sensible Xen configurations to work correctly. */ if (xen_domain()) return true; return false; } static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring, const struct vring_desc_extra *extra) { return vring->use_dma_api && (extra->addr != DMA_MAPPING_ERROR); } size_t virtio_max_dma_size(const struct virtio_device *vdev) { size_t max_segment_size = SIZE_MAX; if (vring_use_dma_api(vdev)) max_segment_size = dma_max_mapping_size(vdev->dev.parent); return max_segment_size; } EXPORT_SYMBOL_GPL(virtio_max_dma_size); static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct device *dma_dev) { if (vring_use_dma_api(vdev)) { return dma_alloc_coherent(dma_dev, size, dma_handle, flag); } else { void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); if (queue) { phys_addr_t phys_addr = virt_to_phys(queue); *dma_handle = (dma_addr_t)phys_addr; /* * Sanity check: make sure we dind't truncate * the address. The only arches I can find that * have 64-bit phys_addr_t but 32-bit dma_addr_t * are certain non-highmem MIPS and x86 * configurations, but these configurations * should never allocate physical pages above 32 * bits, so this is fine. Just in case, throw a * warning and abort if we end up with an * unrepresentable address. */ if (WARN_ON_ONCE(*dma_handle != phys_addr)) { free_pages_exact(queue, PAGE_ALIGN(size)); return NULL; } } return queue; } } static void vring_free_queue(struct virtio_device *vdev, size_t size, void *queue, dma_addr_t dma_handle, struct device *dma_dev) { if (vring_use_dma_api(vdev)) dma_free_coherent(dma_dev, size, queue, dma_handle); else free_pages_exact(queue, PAGE_ALIGN(size)); } /* * The DMA ops on various arches are rather gnarly right now, and * making all of the arch DMA ops work on the vring device itself * is a mess. */ static struct device *vring_dma_dev(const struct vring_virtqueue *vq) { return vq->dma_dev; } /* Map one sg entry. */ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg, enum dma_data_direction direction, dma_addr_t *addr, u32 *len, bool premapped) { if (premapped) { *addr = sg_dma_address(sg); *len = sg_dma_len(sg); return 0; } *len = sg->length; if (!vq->use_dma_api) { /* * If DMA is not used, KMSAN doesn't know that the scatterlist * is initialized by the hardware. Explicitly check/unpoison it * depending on the direction. */ kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction); *addr = (dma_addr_t)sg_phys(sg); return 0; } /* * We can't use dma_map_sg, because we don't use scatterlists in * the way it expects (we don't guarantee that the scatterlist * will exist for the lifetime of the mapping). */ *addr = dma_map_page(vring_dma_dev(vq), sg_page(sg), sg->offset, sg->length, direction); if (dma_mapping_error(vring_dma_dev(vq), *addr)) return -ENOMEM; return 0; } static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, void *cpu_addr, size_t size, enum dma_data_direction direction) { if (!vq->use_dma_api) return (dma_addr_t)virt_to_phys(cpu_addr); return dma_map_single(vring_dma_dev(vq), cpu_addr, size, direction); } static int vring_mapping_error(const struct vring_virtqueue *vq, dma_addr_t addr) { if (!vq->use_dma_api) return 0; return dma_mapping_error(vring_dma_dev(vq), addr); } static void virtqueue_init(struct vring_virtqueue *vq, u32 num) { vq->vq.num_free = num; if (vq->packed_ring) vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); else vq->last_used_idx = 0; vq->event_triggered = false; vq->num_added = 0; #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif } /* * Split ring specific functions - *_split(). */ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, struct vring_desc_extra *extra) { u16 flags; flags = extra->flags; if (flags & VRING_DESC_F_INDIRECT) { if (!vq->use_dma_api) goto out; dma_unmap_single(vring_dma_dev(vq), extra->addr, extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { if (!vring_need_unmap_buffer(vq, extra)) goto out; dma_unmap_page(vring_dma_dev(vq), extra->addr, extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } out: return extra->next; } static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, unsigned int total_sg, gfp_t gfp) { struct vring_desc_extra *extra; struct vring_desc *desc; unsigned int i, size; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~__GFP_HIGHMEM; size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg; desc = kmalloc(size, gfp); if (!desc) return NULL; extra = (struct vring_desc_extra *)&desc[total_sg]; for (i = 0; i < total_sg; i++) extra[i].next = i + 1; return desc; } static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, struct vring_desc *desc, struct vring_desc_extra *extra, unsigned int i, dma_addr_t addr, unsigned int len, u16 flags, bool premapped) { u16 next; desc[i].flags = cpu_to_virtio16(vq->vdev, flags); desc[i].addr = cpu_to_virtio64(vq->vdev, addr); desc[i].len = cpu_to_virtio32(vq->vdev, len); extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; extra[i].len = len; extra[i].flags = flags; next = extra[i].next; desc[i].next = cpu_to_virtio16(vq->vdev, next); return next; } static inline int virtqueue_add_split(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, void *ctx, bool premapped, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct vring_desc_extra *extra; struct scatterlist *sg; struct vring_desc *desc; unsigned int i, n, avail, descs_used, prev, err_idx; int head; bool indirect; START_USE(vq); BUG_ON(data == NULL); BUG_ON(ctx && vq->indirect); if (unlikely(vq->broken)) { END_USE(vq); return -EIO; } LAST_ADD_TIME_UPDATE(vq); BUG_ON(total_sg == 0); head = vq->free_head; if (virtqueue_use_indirect(vq, total_sg)) desc = alloc_indirect_split(_vq, total_sg, gfp); else { desc = NULL; WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); } if (desc) { /* Use a single buffer which doesn't continue */ indirect = true; /* Set up rest to use this indirect table. */ i = 0; descs_used = 1; extra = (struct vring_desc_extra *)&desc[total_sg]; } else { indirect = false; desc = vq->split.vring.desc; extra = vq->split.desc_extra; i = head; descs_used = total_sg; } if (unlikely(vq->vq.num_free < descs_used)) { pr_debug("Can't add buf len %i - avail = %i\n", descs_used, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); if (indirect) kfree(desc); END_USE(vq); return -ENOSPC; } for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; u32 len; if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped)) goto unmap_release; prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, VRING_DESC_F_NEXT, premapped); } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; u32 len; if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped)) goto unmap_release; prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, premapped); } } /* Last one doesn't continue. */ desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); if (!indirect && vring_need_unmap_buffer(vq, &extra[prev])) vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= ~VRING_DESC_F_NEXT; if (indirect) { /* Now that the indirect table is filled in, map it. */ dma_addr_t addr = vring_map_single( vq, desc, total_sg * sizeof(struct vring_desc), DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) goto unmap_release; virtqueue_add_desc_split(_vq, vq->split.vring.desc, vq->split.desc_extra, head, addr, total_sg * sizeof(struct vring_desc), VRING_DESC_F_INDIRECT, false); } /* We're using some buffers from the free list. */ vq->vq.num_free -= descs_used; /* Update free pointer */ if (indirect) vq->free_head = vq->split.desc_extra[head].next; else vq->free_head = i; /* Store token and indirect buffer state. */ vq->split.desc_state[head].data = data; if (indirect) vq->split.desc_state[head].indir_desc = desc; else vq->split.desc_state[head].indir_desc = ctx; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->split.avail_idx_shadow++; vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->split.avail_idx_shadow); vq->num_added++; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); return 0; unmap_release: err_idx = i; if (indirect) i = 0; else i = head; for (n = 0; n < total_sg; n++) { if (i == err_idx) break; i = vring_unmap_one_split(vq, &extra[i]); } if (indirect) kfree(desc); END_USE(vq); return -ENOMEM; } static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq->weak_barriers); old = vq->split.avail_idx_shadow - vq->num_added; new = vq->split.avail_idx_shadow; vq->num_added = 0; LAST_ADD_TIME_CHECK(vq); LAST_ADD_TIME_INVALID(vq); if (vq->event) { needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->split.vring)), new, old); } else { needs_kick = !(vq->split.vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); } END_USE(vq); return needs_kick; } static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, void **ctx) { struct vring_desc_extra *extra; unsigned int i, j; __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); /* Clear data ptr. */ vq->split.desc_state[head].data = NULL; extra = vq->split.desc_extra; /* Put back on free list: unmap first-level descriptors and find end */ i = head; while (vq->split.vring.desc[i].flags & nextflag) { vring_unmap_one_split(vq, &extra[i]); i = vq->split.desc_extra[i].next; vq->vq.num_free++; } vring_unmap_one_split(vq, &extra[i]); vq->split.desc_extra[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; if (vq->indirect) { struct vring_desc *indir_desc = vq->split.desc_state[head].indir_desc; u32 len, num; /* Free the indirect table, if any, now that it's unmapped. */ if (!indir_desc) return; len = vq->split.desc_extra[head].len; BUG_ON(!(vq->split.desc_extra[head].flags & VRING_DESC_F_INDIRECT)); BUG_ON(len == 0 || len % sizeof(struct vring_desc)); num = len / sizeof(struct vring_desc); extra = (struct vring_desc_extra *)&indir_desc[num]; if (vq->use_dma_api) { for (j = 0; j < num; j++) vring_unmap_one_split(vq, &extra[j]); } kfree(indir_desc); vq->split.desc_state[head].indir_desc = NULL; } else if (ctx) { *ctx = vq->split.desc_state[head].indir_desc; } } static bool more_used_split(const struct vring_virtqueue *vq) { return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->split.vring.used->idx); } static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, unsigned int *len, void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used_split(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); i = virtio32_to_cpu(_vq->vdev, vq->split.vring.used->ring[last_used].id); *len = virtio32_to_cpu(_vq->vdev, vq->split.vring.used->ring[last_used].len); if (unlikely(i >= vq->split.vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->split.desc_state[i].data)) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf_split clears data, so grab it now. */ ret = vq->split.desc_state[i].data; detach_buf_split(vq, i, ctx); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) virtio_store_mb(vq->weak_barriers, &vring_used_event(&vq->split.vring), cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); LAST_ADD_TIME_INVALID(vq); END_USE(vq); return ret; } static void virtqueue_disable_cb_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; /* * If device triggered an event already it won't trigger one again: * no need to disable. */ if (vq->event_triggered) return; if (vq->event) /* TODO: this is a hack. Figure out a cleaner value to write. */ vring_used_event(&vq->split.vring) = 0x0; else vq->split.vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->split.avail_flags_shadow); } } static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used_idx; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->split.vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->split.avail_flags_shadow); } vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); END_USE(vq); return last_used_idx; } static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx); } static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always update the event index to keep code simple. */ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->split.vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->split.avail_flags_shadow); } /* TODO: tune this threshold */ bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; virtio_store_mb(vq->weak_barriers, &vring_used_event(&vq->split.vring), cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; } static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->split.vring.num; i++) { if (!vq->split.desc_state[i].data) continue; /* detach_buf_split clears data, so grab it now. */ buf = vq->split.desc_state[i].data; detach_buf_split(vq, i, NULL); vq->split.avail_idx_shadow--; vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->split.avail_idx_shadow); END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->split.vring.num); END_USE(vq); return NULL; } static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split, struct vring_virtqueue *vq) { struct virtio_device *vdev; vdev = vq->vq.vdev; vring_split->avail_flags_shadow = 0; vring_split->avail_idx_shadow = 0; /* No callback? Tell other side not to bother us. */ if (!vq->vq.callback) { vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vring_split->vring.avail->flags = cpu_to_virtio16(vdev, vring_split->avail_flags_shadow); } } static void virtqueue_reinit_split(struct vring_virtqueue *vq) { int num; num = vq->split.vring.num; vq->split.vring.avail->flags = 0; vq->split.vring.avail->idx = 0; /* reset avail event */ vq->split.vring.avail->ring[num] = 0; vq->split.vring.used->flags = 0; vq->split.vring.used->idx = 0; /* reset used event */ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; virtqueue_init(vq, num); virtqueue_vring_init_split(&vq->split, vq); } static void virtqueue_vring_attach_split(struct vring_virtqueue *vq, struct vring_virtqueue_split *vring_split) { vq->split = *vring_split; /* Put everything in free lists. */ vq->free_head = 0; } static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split) { struct vring_desc_state_split *state; struct vring_desc_extra *extra; u32 num = vring_split->vring.num; state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL); if (!state) goto err_state; extra = vring_alloc_desc_extra(num); if (!extra) goto err_extra; memset(state, 0, num * sizeof(struct vring_desc_state_split)); vring_split->desc_state = state; vring_split->desc_extra = extra; return 0; err_extra: kfree(state); err_state: return -ENOMEM; } static void vring_free_split(struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, struct device *dma_dev) { vring_free_queue(vdev, vring_split->queue_size_in_bytes, vring_split->vring.desc, vring_split->queue_dma_addr, dma_dev); kfree(vring_split->desc_state); kfree(vring_split->desc_extra); } static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, u32 num, unsigned int vring_align, bool may_reduce_num, struct device *dma_dev) { void *queue = NULL; dma_addr_t dma_addr; /* We assume num is a power of 2. */ if (!is_power_of_2(num)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return -EINVAL; } /* TODO: allocate each queue chunk individually */ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { queue = vring_alloc_queue(vdev, vring_size(num, vring_align), &dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (queue) break; if (!may_reduce_num) return -ENOMEM; } if (!num) return -ENOMEM; if (!queue) { /* Try to get a single page. You are my only hope! */ queue = vring_alloc_queue(vdev, vring_size(num, vring_align), &dma_addr, GFP_KERNEL | __GFP_ZERO, dma_dev); } if (!queue) return -ENOMEM; vring_init(&vring_split->vring, num, queue, vring_align); vring_split->queue_dma_addr = dma_addr; vring_split->queue_size_in_bytes = vring_size(num, vring_align); vring_split->vring_align = vring_align; vring_split->may_reduce_num = may_reduce_num; return 0; } static struct virtqueue *__vring_new_virtqueue_split(unsigned int index, struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, bool weak_barriers, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { struct vring_virtqueue *vq; int err; vq = kmalloc(sizeof(*vq), GFP_KERNEL); if (!vq) return NULL; vq->packed_ring = false; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.index = index; vq->vq.reset = false; vq->we_own_ring = false; vq->notify = notify; vq->weak_barriers = weak_barriers; #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION vq->broken = true; #else vq->broken = false; #endif vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) vq->weak_barriers = false; err = vring_alloc_state_extra_split(vring_split); if (err) { kfree(vq); return NULL; } virtqueue_vring_init_split(vring_split, vq); virtqueue_init(vq, vring_split->vring.num); virtqueue_vring_attach_split(vq, vring_split); spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); spin_unlock(&vdev->vqs_list_lock); return &vq->vq; } static struct virtqueue *vring_create_virtqueue_split( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { struct vring_virtqueue_split vring_split = {}; struct virtqueue *vq; int err; err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align, may_reduce_num, dma_dev); if (err) return NULL; vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, dma_dev); if (!vq) { vring_free_split(&vring_split, vdev, dma_dev); return NULL; } to_vvq(vq)->we_own_ring = true; return vq; } static int virtqueue_resize_split(struct virtqueue *_vq, u32 num) { struct vring_virtqueue_split vring_split = {}; struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = _vq->vdev; int err; err = vring_alloc_queue_split(&vring_split, vdev, num, vq->split.vring_align, vq->split.may_reduce_num, vring_dma_dev(vq)); if (err) goto err; err = vring_alloc_state_extra_split(&vring_split); if (err) goto err_state_extra; vring_free(&vq->vq); virtqueue_vring_init_split(&vring_split, vq); virtqueue_init(vq, vring_split.vring.num); virtqueue_vring_attach_split(vq, &vring_split); return 0; err_state_extra: vring_free_split(&vring_split, vdev, vring_dma_dev(vq)); err: virtqueue_reinit_split(vq); return -ENOMEM; } /* * Packed ring specific functions - *_packed(). */ static bool packed_used_wrap_counter(u16 last_used_idx) { return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR)); } static u16 packed_last_used(u16 last_used_idx) { return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); } static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, const struct vring_desc_extra *extra) { u16 flags; flags = extra->flags; if (flags & VRING_DESC_F_INDIRECT) { if (!vq->use_dma_api) return; dma_unmap_single(vring_dma_dev(vq), extra->addr, extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { if (!vring_need_unmap_buffer(vq, extra)) return; dma_unmap_page(vring_dma_dev(vq), extra->addr, extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } } static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, gfp_t gfp) { struct vring_desc_extra *extra; struct vring_packed_desc *desc; int i, size; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~__GFP_HIGHMEM; size = (sizeof(*desc) + sizeof(*extra)) * total_sg; desc = kmalloc(size, gfp); if (!desc) return NULL; extra = (struct vring_desc_extra *)&desc[total_sg]; for (i = 0; i < total_sg; i++) extra[i].next = i + 1; return desc; } static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, bool premapped, gfp_t gfp) { struct vring_desc_extra *extra; struct vring_packed_desc *desc; struct scatterlist *sg; unsigned int i, n, err_idx, len; u16 head, id; dma_addr_t addr; head = vq->packed.next_avail_idx; desc = alloc_indirect_packed(total_sg, gfp); if (!desc) return -ENOMEM; extra = (struct vring_desc_extra *)&desc[total_sg]; if (unlikely(vq->vq.num_free < 1)) { pr_debug("Can't add buf len 1 - avail = 0\n"); kfree(desc); END_USE(vq); return -ENOSPC; } i = 0; id = vq->free_head; BUG_ON(id == vq->packed.vring.num); for (n = 0; n < out_sgs + in_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { if (vring_map_one_sg(vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr, &len, premapped)) goto unmap_release; desc[i].flags = cpu_to_le16(n < out_sgs ? 0 : VRING_DESC_F_WRITE); desc[i].addr = cpu_to_le64(addr); desc[i].len = cpu_to_le32(len); if (unlikely(vq->use_dma_api)) { extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; extra[i].len = len; extra[i].flags = n < out_sgs ? 0 : VRING_DESC_F_WRITE; } i++; } } /* Now that the indirect table is filled in, map it. */ addr = vring_map_single(vq, desc, total_sg * sizeof(struct vring_packed_desc), DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) goto unmap_release; vq->packed.vring.desc[head].addr = cpu_to_le64(addr); vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * sizeof(struct vring_packed_desc)); vq->packed.vring.desc[head].id = cpu_to_le16(id); if (vq->use_dma_api) { vq->packed.desc_extra[id].addr = addr; vq->packed.desc_extra[id].len = total_sg * sizeof(struct vring_packed_desc); vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags; } /* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising * the list are made available. */ virtio_wmb(vq->weak_barriers); vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags); /* We're using some buffers from the free list. */ vq->vq.num_free -= 1; /* Update free pointer */ n = head + 1; if (n >= vq->packed.vring.num) { n = 0; vq->packed.avail_wrap_counter ^= 1; vq->packed.avail_used_flags ^= 1 << VRING_PACKED_DESC_F_AVAIL | 1 << VRING_PACKED_DESC_F_USED; } vq->packed.next_avail_idx = n; vq->free_head = vq->packed.desc_extra[id].next; /* Store token and indirect buffer state. */ vq->packed.desc_state[id].num = 1; vq->packed.desc_state[id].data = data; vq->packed.desc_state[id].indir_desc = desc; vq->packed.desc_state[id].last = id; vq->num_added += 1; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; unmap_release: err_idx = i; for (i = 0; i < err_idx; i++) vring_unmap_extra_packed(vq, &extra[i]); kfree(desc); END_USE(vq); return -ENOMEM; } static inline int virtqueue_add_packed(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, void *ctx, bool premapped, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct vring_packed_desc *desc; struct scatterlist *sg; unsigned int i, n, c, descs_used, err_idx, len; __le16 head_flags, flags; u16 head, id, prev, curr, avail_used_flags; int err; START_USE(vq); BUG_ON(data == NULL); BUG_ON(ctx && vq->indirect); if (unlikely(vq->broken)) { END_USE(vq); return -EIO; } LAST_ADD_TIME_UPDATE(vq); BUG_ON(total_sg == 0); if (virtqueue_use_indirect(vq, total_sg)) { err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs, data, premapped, gfp); if (err != -ENOMEM) { END_USE(vq); return err; } /* fall back on direct */ } head = vq->packed.next_avail_idx; avail_used_flags = vq->packed.avail_used_flags; WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); desc = vq->packed.vring.desc; i = head; descs_used = total_sg; if (unlikely(vq->vq.num_free < descs_used)) { pr_debug("Can't add buf len %i - avail = %i\n", descs_used, vq->vq.num_free); END_USE(vq); return -ENOSPC; } id = vq->free_head; BUG_ON(id == vq->packed.vring.num); curr = id; c = 0; for (n = 0; n < out_sgs + in_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; if (vring_map_one_sg(vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr, &len, premapped)) goto unmap_release; flags = cpu_to_le16(vq->packed.avail_used_flags | (++c == total_sg ? 0 : VRING_DESC_F_NEXT) | (n < out_sgs ? 0 : VRING_DESC_F_WRITE)); if (i == head) head_flags = flags; else desc[i].flags = flags; desc[i].addr = cpu_to_le64(addr); desc[i].len = cpu_to_le32(len); desc[i].id = cpu_to_le16(id); if (unlikely(vq->use_dma_api)) { vq->packed.desc_extra[curr].addr = premapped ? DMA_MAPPING_ERROR : addr; vq->packed.desc_extra[curr].len = len; vq->packed.desc_extra[curr].flags = le16_to_cpu(flags); } prev = curr; curr = vq->packed.desc_extra[curr].next; if ((unlikely(++i >= vq->packed.vring.num))) { i = 0; vq->packed.avail_used_flags ^= 1 << VRING_PACKED_DESC_F_AVAIL | 1 << VRING_PACKED_DESC_F_USED; } } } if (i <= head) vq->packed.avail_wrap_counter ^= 1; /* We're using some buffers from the free list. */ vq->vq.num_free -= descs_used; /* Update free pointer */ vq->packed.next_avail_idx = i; vq->free_head = curr; /* Store token. */ vq->packed.desc_state[id].num = descs_used; vq->packed.desc_state[id].data = data; vq->packed.desc_state[id].indir_desc = ctx; vq->packed.desc_state[id].last = prev; /* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising * the list are made available. */ virtio_wmb(vq->weak_barriers); vq->packed.vring.desc[head].flags = head_flags; vq->num_added += descs_used; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; unmap_release: err_idx = i; i = head; curr = vq->free_head; vq->packed.avail_used_flags = avail_used_flags; for (n = 0; n < total_sg; n++) { if (i == err_idx) break; vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); curr = vq->packed.desc_extra[curr].next; i++; if (i >= vq->packed.vring.num) i = 0; } END_USE(vq); return -EIO; } static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old, off_wrap, flags, wrap_counter, event_idx; bool needs_kick; union { struct { __le16 off_wrap; __le16 flags; }; u32 u32; } snapshot; START_USE(vq); /* * We need to expose the new flags value before checking notification * suppressions. */ virtio_mb(vq->weak_barriers); old = vq->packed.next_avail_idx - vq->num_added; new = vq->packed.next_avail_idx; vq->num_added = 0; snapshot.u32 = *(u32 *)vq->packed.vring.device; flags = le16_to_cpu(snapshot.flags); LAST_ADD_TIME_CHECK(vq); LAST_ADD_TIME_INVALID(vq); if (flags != VRING_PACKED_EVENT_FLAG_DESC) { needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE); goto out; } off_wrap = le16_to_cpu(snapshot.off_wrap); wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); if (wrap_counter != vq->packed.avail_wrap_counter) event_idx -= vq->packed.vring.num; needs_kick = vring_need_event(event_idx, new, old); out: END_USE(vq); return needs_kick; } static void detach_buf_packed(struct vring_virtqueue *vq, unsigned int id, void **ctx) { struct vring_desc_state_packed *state = NULL; struct vring_packed_desc *desc; unsigned int i, curr; state = &vq->packed.desc_state[id]; /* Clear data ptr. */ state->data = NULL; vq->packed.desc_extra[state->last].next = vq->free_head; vq->free_head = id; vq->vq.num_free += state->num; if (unlikely(vq->use_dma_api)) { curr = id; for (i = 0; i < state->num; i++) { vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); curr = vq->packed.desc_extra[curr].next; } } if (vq->indirect) { struct vring_desc_extra *extra; u32 len, num; /* Free the indirect table, if any, now that it's unmapped. */ desc = state->indir_desc; if (!desc) return; if (vq->use_dma_api) { len = vq->packed.desc_extra[id].len; num = len / sizeof(struct vring_packed_desc); extra = (struct vring_desc_extra *)&desc[num]; for (i = 0; i < num; i++) vring_unmap_extra_packed(vq, &extra[i]); } kfree(desc); state->indir_desc = NULL; } else if (ctx) { *ctx = state->indir_desc; } } static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, u16 idx, bool used_wrap_counter) { bool avail, used; u16 flags; flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); return avail == used && used == used_wrap_counter; } static bool more_used_packed(const struct vring_virtqueue *vq) { u16 last_used; u16 last_used_idx; bool used_wrap_counter; last_used_idx = READ_ONCE(vq->last_used_idx); last_used = packed_last_used(last_used_idx); used_wrap_counter = packed_used_wrap_counter(last_used_idx); return is_used_desc_packed(vq, last_used, used_wrap_counter); } static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, unsigned int *len, void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used, id, last_used_idx; bool used_wrap_counter; void *ret; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used_packed(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used elements after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used_idx = READ_ONCE(vq->last_used_idx); used_wrap_counter = packed_used_wrap_counter(last_used_idx); last_used = packed_last_used(last_used_idx); id = le16_to_cpu(vq->packed.vring.desc[last_used].id); *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); if (unlikely(id >= vq->packed.vring.num)) { BAD_RING(vq, "id %u out of range\n", id); return NULL; } if (unlikely(!vq->packed.desc_state[id].data)) { BAD_RING(vq, "id %u is not a head!\n", id); return NULL; } /* detach_buf_packed clears data, so grab it now. */ ret = vq->packed.desc_state[id].data; detach_buf_packed(vq, id, ctx); last_used += vq->packed.desc_state[id].num; if (unlikely(last_used >= vq->packed.vring.num)) { last_used -= vq->packed.vring.num; used_wrap_counter ^= 1; } last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); WRITE_ONCE(vq->last_used_idx, last_used); /* * If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) virtio_store_mb(vq->weak_barriers, &vq->packed.vring.driver->off_wrap, cpu_to_le16(vq->last_used_idx)); LAST_ADD_TIME_INVALID(vq); END_USE(vq); return ret; } static void virtqueue_disable_cb_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; /* * If device triggered an event already it won't trigger one again: * no need to disable. */ if (vq->event_triggered) return; vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow); } } static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); START_USE(vq); /* * We optimistically turn back on interrupts, then check if there was * more to do. */ if (vq->event) { vq->packed.vring.driver->off_wrap = cpu_to_le16(vq->last_used_idx); /* * We need to update event offset and event wrap * counter first before updating event flags. */ virtio_wmb(vq->weak_barriers); } if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { vq->packed.event_flags_shadow = vq->event ? VRING_PACKED_EVENT_FLAG_DESC : VRING_PACKED_EVENT_FLAG_ENABLE; vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow); } END_USE(vq); return vq->last_used_idx; } static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) { struct vring_virtqueue *vq = to_vvq(_vq); bool wrap_counter; u16 used_idx; wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); return is_used_desc_packed(vq, used_idx, wrap_counter); } static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 used_idx, wrap_counter, last_used_idx; u16 bufs; START_USE(vq); /* * We optimistically turn back on interrupts, then check if there was * more to do. */ if (vq->event) { /* TODO: tune this threshold */ bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; last_used_idx = READ_ONCE(vq->last_used_idx); wrap_counter = packed_used_wrap_counter(last_used_idx); used_idx = packed_last_used(last_used_idx) + bufs; if (used_idx >= vq->packed.vring.num) { used_idx -= vq->packed.vring.num; wrap_counter ^= 1; } vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); /* * We need to update event offset and event wrap * counter first before updating event flags. */ virtio_wmb(vq->weak_barriers); } if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { vq->packed.event_flags_shadow = vq->event ? VRING_PACKED_EVENT_FLAG_DESC : VRING_PACKED_EVENT_FLAG_ENABLE; vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow); } /* * We need to update event suppression structure first * before re-checking for more used buffers. */ virtio_mb(vq->weak_barriers); last_used_idx = READ_ONCE(vq->last_used_idx); wrap_counter = packed_used_wrap_counter(last_used_idx); used_idx = packed_last_used(last_used_idx); if (is_used_desc_packed(vq, used_idx, wrap_counter)) { END_USE(vq); return false; } END_USE(vq); return true; } static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->packed.vring.num; i++) { if (!vq->packed.desc_state[i].data) continue; /* detach_buf clears data, so grab it now. */ buf = vq->packed.desc_state[i].data; detach_buf_packed(vq, i, NULL); END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->packed.vring.num); END_USE(vq); return NULL; } static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num) { struct vring_desc_extra *desc_extra; unsigned int i; desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra), GFP_KERNEL); if (!desc_extra) return NULL; memset(desc_extra, 0, num * sizeof(struct vring_desc_extra)); for (i = 0; i < num - 1; i++) desc_extra[i].next = i + 1; return desc_extra; } static void vring_free_packed(struct vring_virtqueue_packed *vring_packed, struct virtio_device *vdev, struct device *dma_dev) { if (vring_packed->vring.desc) vring_free_queue(vdev, vring_packed->ring_size_in_bytes, vring_packed->vring.desc, vring_packed->ring_dma_addr, dma_dev); if (vring_packed->vring.driver) vring_free_queue(vdev, vring_packed->event_size_in_bytes, vring_packed->vring.driver, vring_packed->driver_event_dma_addr, dma_dev); if (vring_packed->vring.device) vring_free_queue(vdev, vring_packed->event_size_in_bytes, vring_packed->vring.device, vring_packed->device_event_dma_addr, dma_dev); kfree(vring_packed->desc_state); kfree(vring_packed->desc_extra); } static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed, struct virtio_device *vdev, u32 num, struct device *dma_dev) { struct vring_packed_desc *ring; struct vring_packed_desc_event *driver, *device; dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr; size_t ring_size_in_bytes, event_size_in_bytes; ring_size_in_bytes = num * sizeof(struct vring_packed_desc); ring = vring_alloc_queue(vdev, ring_size_in_bytes, &ring_dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (!ring) goto err; vring_packed->vring.desc = ring; vring_packed->ring_dma_addr = ring_dma_addr; vring_packed->ring_size_in_bytes = ring_size_in_bytes; event_size_in_bytes = sizeof(struct vring_packed_desc_event); driver = vring_alloc_queue(vdev, event_size_in_bytes, &driver_event_dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (!driver) goto err; vring_packed->vring.driver = driver; vring_packed->event_size_in_bytes = event_size_in_bytes; vring_packed->driver_event_dma_addr = driver_event_dma_addr; device = vring_alloc_queue(vdev, event_size_in_bytes, &device_event_dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (!device) goto err; vring_packed->vring.device = device; vring_packed->device_event_dma_addr = device_event_dma_addr; vring_packed->vring.num = num; return 0; err: vring_free_packed(vring_packed, vdev, dma_dev); return -ENOMEM; } static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed) { struct vring_desc_state_packed *state; struct vring_desc_extra *extra; u32 num = vring_packed->vring.num; state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL); if (!state) goto err_desc_state; memset(state, 0, num * sizeof(struct vring_desc_state_packed)); extra = vring_alloc_desc_extra(num); if (!extra) goto err_desc_extra; vring_packed->desc_state = state; vring_packed->desc_extra = extra; return 0; err_desc_extra: kfree(state); err_desc_state: return -ENOMEM; } static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed, bool callback) { vring_packed->next_avail_idx = 0; vring_packed->avail_wrap_counter = 1; vring_packed->event_flags_shadow = 0; vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; /* No callback? Tell other side not to bother us. */ if (!callback) { vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; vring_packed->vring.driver->flags = cpu_to_le16(vring_packed->event_flags_shadow); } } static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq, struct vring_virtqueue_packed *vring_packed) { vq->packed = *vring_packed; /* Put everything in free lists. */ vq->free_head = 0; } static void virtqueue_reinit_packed(struct vring_virtqueue *vq) { memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); /* we need to reset the desc.flags. For more, see is_used_desc_packed() */ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); virtqueue_init(vq, vq->packed.vring.num); virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); } static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index, struct vring_virtqueue_packed *vring_packed, struct virtio_device *vdev, bool weak_barriers, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { struct vring_virtqueue *vq; int err; vq = kmalloc(sizeof(*vq), GFP_KERNEL); if (!vq) return NULL; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.index = index; vq->vq.reset = false; vq->we_own_ring = false; vq->notify = notify; vq->weak_barriers = weak_barriers; #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION vq->broken = true; #else vq->broken = false; #endif vq->packed_ring = true; vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) vq->weak_barriers = false; err = vring_alloc_state_extra_packed(vring_packed); if (err) { kfree(vq); return NULL; } virtqueue_vring_init_packed(vring_packed, !!callback); virtqueue_init(vq, vring_packed->vring.num); virtqueue_vring_attach_packed(vq, vring_packed); spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); spin_unlock(&vdev->vqs_list_lock); return &vq->vq; } static struct virtqueue *vring_create_virtqueue_packed( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { struct vring_virtqueue_packed vring_packed = {}; struct virtqueue *vq; if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) return NULL; vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers, context, notify, callback, name, dma_dev); if (!vq) { vring_free_packed(&vring_packed, vdev, dma_dev); return NULL; } to_vvq(vq)->we_own_ring = true; return vq; } static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num) { struct vring_virtqueue_packed vring_packed = {}; struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = _vq->vdev; int err; if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq))) goto err_ring; err = vring_alloc_state_extra_packed(&vring_packed); if (err) goto err_state_extra; vring_free(&vq->vq); virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); virtqueue_init(vq, vring_packed.vring.num); virtqueue_vring_attach_packed(vq, &vring_packed); return 0; err_state_extra: vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq)); err_ring: virtqueue_reinit_packed(vq); return -ENOMEM; } static int virtqueue_disable_and_recycle(struct virtqueue *_vq, void (*recycle)(struct virtqueue *vq, void *buf)) { struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = vq->vq.vdev; void *buf; int err; if (!vq->we_own_ring) return -EPERM; if (!vdev->config->disable_vq_and_reset) return -ENOENT; if (!vdev->config->enable_vq_after_reset) return -ENOENT; err = vdev->config->disable_vq_and_reset(_vq); if (err) return err; while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL) recycle(_vq, buf); return 0; } static int virtqueue_enable_after_reset(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = vq->vq.vdev; if (vdev->config->enable_vq_after_reset(_vq)) return -EBUSY; return 0; } /* * Generic functions and exported symbols. */ static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, void *ctx, bool premapped, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, out_sgs, in_sgs, data, ctx, premapped, gfp) : virtqueue_add_split(_vq, sgs, total_sg, out_sgs, in_sgs, data, ctx, premapped, gfp); } /** * virtqueue_add_sgs - expose buffers to other end * @_vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_sgs: the number of scatterlists readable by other side * @in_sgs: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { unsigned int i, total_sg = 0; /* Count them first. */ for (i = 0; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_sg++; } return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, NULL, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_outbuf_premapped - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Return: * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_outbuf_premapped(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_add_inbuf_ctx - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @ctx: extra context for the token * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, void *ctx, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); /** * virtqueue_add_inbuf_premapped - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @ctx: extra context for the token * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Return: * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_inbuf_premapped(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, void *ctx, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped); /** * virtqueue_dma_dev - get the dma dev * @_vq: the struct virtqueue we're talking about. * * Returns the dma dev. That can been used for dma api. */ struct device *virtqueue_dma_dev(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->use_dma_api) return vring_dma_dev(vq); else return NULL; } EXPORT_SYMBOL_GPL(virtqueue_dma_dev); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @_vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : virtqueue_kick_prepare_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @_vq: the struct virtqueue * * This does not need to be serialized. * * Returns false if host notify failed or queue is broken, otherwise true. */ bool virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (unlikely(vq->broken)) return false; /* Prod other side to tell it about changes. */ if (!vq->notify(_vq)) { vq->broken = true; return false; } return true; } EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_* calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns false if kick failed, otherwise true. */ bool virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) return virtqueue_notify(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_kick); /** * virtqueue_get_buf_ctx - get the next used buffer * @_vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * @ctx: extra context for the token * * If the device wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_*(). */ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : virtqueue_get_buf_ctx_split(_vq, len, ctx); } EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { return virtqueue_get_buf_ctx(_vq, len, NULL); } EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @_vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->packed_ring) virtqueue_disable_cb_packed(_vq); else virtqueue_disable_cb_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by * virtqueue_poll, to detect a possible race between the driver checking for * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->event_triggered) vq->event_triggered = false; return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : virtqueue_enable_cb_prepare_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers * @_vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. * * This does not need to be serialized. */ bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); if (unlikely(vq->broken)) return false; virtio_mb(vq->weak_barriers); return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : virtqueue_poll_split(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb(struct virtqueue *_vq) { unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq); return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->event_triggered) data_race(vq->event_triggered = false); return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : virtqueue_enable_cb_delayed_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @_vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_*(). * This is not valid on an active queue; it is useful for device * shutdown or the reset queue. */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : virtqueue_detach_unused_buf_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); static inline bool more_used(const struct vring_virtqueue *vq) { return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); } /** * vring_interrupt - notify a virtqueue on an interrupt * @irq: the IRQ number (ignored) * @_vq: the struct virtqueue to notify * * Calls the callback function of @_vq to process the virtqueue * notification. */ irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) { #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION dev_warn_once(&vq->vq.vdev->dev, "virtio vring IRQ raised before DRIVER_OK"); return IRQ_NONE; #else return IRQ_HANDLED; #endif } /* Just a hint for performance: so it's ok that this can be racy! */ if (vq->event) data_race(vq->event_triggered = true); pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(vring_interrupt); struct virtqueue *vring_create_virtqueue( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) return vring_create_virtqueue_packed(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, vdev->dev.parent); return vring_create_virtqueue_split(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, vdev->dev.parent); } EXPORT_SYMBOL_GPL(vring_create_virtqueue); struct virtqueue *vring_create_virtqueue_dma( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) return vring_create_virtqueue_packed(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, dma_dev); return vring_create_virtqueue_split(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, dma_dev); } EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); /** * virtqueue_resize - resize the vring of vq * @_vq: the struct virtqueue we're talking about. * @num: new ring num * @recycle: callback to recycle unused buffers * @recycle_done: callback to be invoked when recycle for all unused buffers done * * When it is really necessary to create a new vring, it will set the current vq * into the reset state. Then call the passed callback to recycle the buffer * that is no longer used. Only after the new vring is successfully created, the * old vring will be released. * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error. * 0: success. * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size. * vq can still work normally * -EBUSY: Failed to sync with device, vq may not work properly * -ENOENT: Transport or device not supported * -E2BIG/-EINVAL: num error * -EPERM: Operation not permitted * */ int virtqueue_resize(struct virtqueue *_vq, u32 num, void (*recycle)(struct virtqueue *vq, void *buf), void (*recycle_done)(struct virtqueue *vq)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; if (num > vq->vq.num_max) return -E2BIG; if (!num) return -EINVAL; if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) return 0; err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; if (recycle_done) recycle_done(_vq); if (vq->packed_ring) err = virtqueue_resize_packed(_vq, num); else err = virtqueue_resize_split(_vq, num); return virtqueue_enable_after_reset(_vq); } EXPORT_SYMBOL_GPL(virtqueue_resize); /** * virtqueue_reset - detach and recycle all unused buffers * @_vq: the struct virtqueue we're talking about. * @recycle: callback to recycle unused buffers * @recycle_done: callback to be invoked when recycle for all unused buffers done * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error. * 0: success. * -EBUSY: Failed to sync with device, vq may not work properly * -ENOENT: Transport or device not supported * -EPERM: Operation not permitted */ int virtqueue_reset(struct virtqueue *_vq, void (*recycle)(struct virtqueue *vq, void *buf), void (*recycle_done)(struct virtqueue *vq)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; if (recycle_done) recycle_done(_vq); if (vq->packed_ring) virtqueue_reinit_packed(vq); else virtqueue_reinit_split(vq); return virtqueue_enable_after_reset(_vq); } EXPORT_SYMBOL_GPL(virtqueue_reset); struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool context, void *pages, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name) { struct vring_virtqueue_split vring_split = {}; if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) { struct vring_virtqueue_packed vring_packed = {}; vring_packed.vring.num = num; vring_packed.vring.desc = pages; return __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers, context, notify, callback, name, vdev->dev.parent); } vring_init(&vring_split.vring, num, pages, vring_align); return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, vdev->dev.parent); } EXPORT_SYMBOL_GPL(vring_new_virtqueue); static void vring_free(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->we_own_ring) { if (vq->packed_ring) { vring_free_queue(vq->vq.vdev, vq->packed.ring_size_in_bytes, vq->packed.vring.desc, vq->packed.ring_dma_addr, vring_dma_dev(vq)); vring_free_queue(vq->vq.vdev, vq->packed.event_size_in_bytes, vq->packed.vring.driver, vq->packed.driver_event_dma_addr, vring_dma_dev(vq)); vring_free_queue(vq->vq.vdev, vq->packed.event_size_in_bytes, vq->packed.vring.device, vq->packed.device_event_dma_addr, vring_dma_dev(vq)); kfree(vq->packed.desc_state); kfree(vq->packed.desc_extra); } else { vring_free_queue(vq->vq.vdev, vq->split.queue_size_in_bytes, vq->split.vring.desc, vq->split.queue_dma_addr, vring_dma_dev(vq)); } } if (!vq->packed_ring) { kfree(vq->split.desc_state); kfree(vq->split.desc_extra); } } void vring_del_virtqueue(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); spin_lock(&vq->vq.vdev->vqs_list_lock); list_del(&_vq->list); spin_unlock(&vq->vq.vdev->vqs_list_lock); vring_free(_vq); kfree(vq); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); u32 vring_notification_data(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 next; if (vq->packed_ring) next = (vq->packed.next_avail_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) | vq->packed.avail_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR; else next = vq->split.avail_idx_shadow; return next << 16 | _vq->index; } EXPORT_SYMBOL_GPL(vring_notification_data); /* Manipulates transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; case VIRTIO_F_VERSION_1: break; case VIRTIO_F_ACCESS_PLATFORM: break; case VIRTIO_F_RING_PACKED: break; case VIRTIO_F_ORDER_PLATFORM: break; case VIRTIO_F_NOTIFICATION_DATA: break; default: /* We don't understand this bit. */ __virtio_clear_bit(vdev, i); } } } EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @_vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); /* * This function should only be called by the core, not directly by the driver. */ void __virtqueue_break(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, true); } EXPORT_SYMBOL_GPL(__virtqueue_break); /* * This function should only be called by the core, not directly by the driver. */ void __virtqueue_unbreak(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, false); } EXPORT_SYMBOL_GPL(__virtqueue_unbreak); bool virtqueue_is_broken(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); return READ_ONCE(vq->broken); } EXPORT_SYMBOL_GPL(virtqueue_is_broken); /* * This should prevent the device from being used, allowing drivers to * recover. You may need to grab appropriate locks to flush. */ void virtio_break_device(struct virtio_device *dev) { struct virtqueue *_vq; spin_lock(&dev->vqs_list_lock); list_for_each_entry(_vq, &dev->vqs, list) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, true); } spin_unlock(&dev->vqs_list_lock); } EXPORT_SYMBOL_GPL(virtio_break_device); /* * This should allow the device to be used by the driver. You may * need to grab appropriate locks to flush the write to * vq->broken. This should only be used in some specific case e.g * (probing and restoring). This function should only be called by the * core, not directly by the driver. */ void __virtio_unbreak_device(struct virtio_device *dev) { struct virtqueue *_vq; spin_lock(&dev->vqs_list_lock); list_for_each_entry(_vq, &dev->vqs, list) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, false); } spin_unlock(&dev->vqs_list_lock); } EXPORT_SYMBOL_GPL(__virtio_unbreak_device); dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); if (vq->packed_ring) return vq->packed.ring_dma_addr; return vq->split.queue_dma_addr; } EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); if (vq->packed_ring) return vq->packed.driver_event_dma_addr; return vq->split.queue_dma_addr + ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); } EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); if (vq->packed_ring) return vq->packed.device_event_dma_addr; return vq->split.queue_dma_addr + ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); } EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); /* Only available for split ring */ const struct vring *virtqueue_get_vring(const struct virtqueue *vq) { return &to_vvq(vq)->split.vring; } EXPORT_SYMBOL_GPL(virtqueue_get_vring); /** * virtqueue_dma_map_single_attrs - map DMA for _vq * @_vq: the struct virtqueue we're talking about. * @ptr: the pointer of the buffer to do dma * @size: the size of the buffer to do dma * @dir: DMA direction * @attrs: DMA Attrs * * The caller calls this to do dma mapping in advance. The DMA address can be * passed to this _vq when it is in pre-mapped mode. * * return DMA address. Caller should check that by virtqueue_dma_mapping_error(). */ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) { kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir); return (dma_addr_t)virt_to_phys(ptr); } return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs); } EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs); /** * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq * @_vq: the struct virtqueue we're talking about. * @addr: the dma address to unmap * @size: the size of the buffer * @dir: DMA direction * @attrs: DMA Attrs * * Unmap the address that is mapped by the virtqueue_dma_map_* APIs. * */ void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) return; dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs); } EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs); /** * virtqueue_dma_mapping_error - check dma address * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * * Returns 0 means dma valid. Other means invalid dma address. */ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) return 0; return dma_mapping_error(vring_dma_dev(vq), addr); } EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error); /** * virtqueue_dma_need_sync - check a dma address needs sync * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be * synchronized * * return bool */ bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) return false; return dma_need_sync(vring_dma_dev(vq), addr); } EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync); /** * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * @offset: DMA address offset * @size: buf size for sync * @dir: DMA direction * * Before calling this function, use virtqueue_dma_need_sync() to confirm that * the DMA address really needs to be synchronized * */ void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir) { struct vring_virtqueue *vq = to_vvq(_vq); struct device *dev = vring_dma_dev(vq); if (!vq->use_dma_api) return; dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); } EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu); /** * virtqueue_dma_sync_single_range_for_device - dma sync for device * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * @offset: DMA address offset * @size: buf size for sync * @dir: DMA direction * * Before calling this function, use virtqueue_dma_need_sync() to confirm that * the DMA address really needs to be synchronized */ void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir) { struct vring_virtqueue *vq = to_vvq(_vq); struct device *dev = vring_dma_dev(vq); if (!vq->use_dma_api) return; dma_sync_single_range_for_device(dev, addr, offset, size, dir); } EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device); MODULE_DESCRIPTION("Virtio ring implementation"); MODULE_LICENSE("GPL"); |
5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 125 125 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2007-2012 Siemens AG * * Written by: * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Sergey Lapin <slapin@ossfans.org> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #include <linux/netdevice.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/ieee802154.h> #include <net/nl802154.h> #include <net/mac802154.h> #include <net/ieee802154_netdev.h> #include <net/cfg802154.h> #include "ieee802154_i.h" #include "driver-ops.h" int mac802154_wpan_update_llsec(struct net_device *dev) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); struct wpan_dev *wpan_dev = &sdata->wpan_dev; int rc = 0; if (ops->llsec) { struct ieee802154_llsec_params params; int changed = 0; params.pan_id = wpan_dev->pan_id; changed |= IEEE802154_LLSEC_PARAM_PAN_ID; params.hwaddr = wpan_dev->extended_addr; changed |= IEEE802154_LLSEC_PARAM_HWADDR; rc = ops->llsec->set_params(dev, ¶ms, changed); } return rc; } static int mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct sockaddr_ieee802154 *sa = (struct sockaddr_ieee802154 *)&ifr->ifr_addr; int err = -ENOIOCTLCMD; if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR) return err; rtnl_lock(); switch (cmd) { case SIOCGIFADDR: { u16 pan_id, short_addr; pan_id = le16_to_cpu(wpan_dev->pan_id); short_addr = le16_to_cpu(wpan_dev->short_addr); if (pan_id == IEEE802154_PANID_BROADCAST || short_addr == IEEE802154_ADDR_BROADCAST) { err = -EADDRNOTAVAIL; break; } sa->family = AF_IEEE802154; sa->addr.addr_type = IEEE802154_ADDR_SHORT; sa->addr.pan_id = pan_id; sa->addr.short_addr = short_addr; err = 0; break; } case SIOCSIFADDR: if (netif_running(dev)) { rtnl_unlock(); return -EBUSY; } dev_warn(&dev->dev, "Using DEBUGing ioctl SIOCSIFADDR isn't recommended!\n"); if (sa->family != AF_IEEE802154 || sa->addr.addr_type != IEEE802154_ADDR_SHORT || sa->addr.pan_id == IEEE802154_PANID_BROADCAST || sa->addr.short_addr == IEEE802154_ADDR_BROADCAST || sa->addr.short_addr == IEEE802154_ADDR_UNDEF) { err = -EINVAL; break; } wpan_dev->pan_id = cpu_to_le16(sa->addr.pan_id); wpan_dev->short_addr = cpu_to_le16(sa->addr.short_addr); err = mac802154_wpan_update_llsec(dev); break; } rtnl_unlock(); return err; } static int mac802154_wpan_mac_addr(struct net_device *dev, void *p) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct sockaddr *addr = p; __le64 extended_addr; if (netif_running(dev)) return -EBUSY; /* lowpan need to be down for update * SLAAC address after ifup */ if (sdata->wpan_dev.lowpan_dev) { if (netif_running(sdata->wpan_dev.lowpan_dev)) return -EBUSY; } ieee802154_be64_to_le64(&extended_addr, addr->sa_data); if (!ieee802154_is_valid_extended_unicast_addr(extended_addr)) return -EINVAL; dev_addr_set(dev, addr->sa_data); sdata->wpan_dev.extended_addr = extended_addr; /* update lowpan interface mac address when * wpan mac has been changed */ if (sdata->wpan_dev.lowpan_dev) dev_addr_set(sdata->wpan_dev.lowpan_dev, dev->dev_addr); return mac802154_wpan_update_llsec(dev); } static int ieee802154_setup_hw(struct ieee802154_sub_if_data *sdata) { struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; int ret; sdata->required_filtering = sdata->iface_default_filtering; if (local->hw.flags & IEEE802154_HW_AFILT) { local->addr_filt.pan_id = wpan_dev->pan_id; local->addr_filt.ieee_addr = wpan_dev->extended_addr; local->addr_filt.short_addr = wpan_dev->short_addr; } if (local->hw.flags & IEEE802154_HW_LBT) { ret = drv_set_lbt_mode(local, wpan_dev->lbt); if (ret < 0) return ret; } if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) { ret = drv_set_csma_params(local, wpan_dev->min_be, wpan_dev->max_be, wpan_dev->csma_retries); if (ret < 0) return ret; } if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) { ret = drv_set_max_frame_retries(local, wpan_dev->frame_retries); if (ret < 0) return ret; } return 0; } static int mac802154_slave_open(struct net_device *dev) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct ieee802154_local *local = sdata->local; int res; ASSERT_RTNL(); set_bit(SDATA_STATE_RUNNING, &sdata->state); if (!local->open_count) { res = ieee802154_setup_hw(sdata); if (res) goto err; res = drv_start(local, sdata->required_filtering, &local->addr_filt); if (res) goto err; } local->open_count++; netif_start_queue(dev); return 0; err: /* might already be clear but that doesn't matter */ clear_bit(SDATA_STATE_RUNNING, &sdata->state); return res; } static int ieee802154_check_mac_settings(struct ieee802154_local *local, struct ieee802154_sub_if_data *sdata, struct ieee802154_sub_if_data *nsdata) { struct wpan_dev *nwpan_dev = &nsdata->wpan_dev; struct wpan_dev *wpan_dev = &sdata->wpan_dev; ASSERT_RTNL(); if (sdata->iface_default_filtering != nsdata->iface_default_filtering) return -EBUSY; if (local->hw.flags & IEEE802154_HW_AFILT) { if (wpan_dev->pan_id != nwpan_dev->pan_id || wpan_dev->short_addr != nwpan_dev->short_addr || wpan_dev->extended_addr != nwpan_dev->extended_addr) return -EBUSY; } if (local->hw.flags & IEEE802154_HW_CSMA_PARAMS) { if (wpan_dev->min_be != nwpan_dev->min_be || wpan_dev->max_be != nwpan_dev->max_be || wpan_dev->csma_retries != nwpan_dev->csma_retries) return -EBUSY; } if (local->hw.flags & IEEE802154_HW_FRAME_RETRIES) { if (wpan_dev->frame_retries != nwpan_dev->frame_retries) return -EBUSY; } if (local->hw.flags & IEEE802154_HW_LBT) { if (wpan_dev->lbt != nwpan_dev->lbt) return -EBUSY; } return 0; } static int ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data *sdata, enum nl802154_iftype iftype) { struct ieee802154_local *local = sdata->local; struct ieee802154_sub_if_data *nsdata; /* we hold the RTNL here so can safely walk the list */ list_for_each_entry(nsdata, &local->interfaces, list) { if (nsdata != sdata && ieee802154_sdata_running(nsdata)) { int ret; /* TODO currently we don't support multiple node/coord * types we need to run skb_clone at rx path. Check if * there exist really an use case if we need to support * multiple node/coord types at the same time. */ if (sdata->wpan_dev.iftype != NL802154_IFTYPE_MONITOR && nsdata->wpan_dev.iftype != NL802154_IFTYPE_MONITOR) return -EBUSY; /* check all phy mac sublayer settings are the same. * We have only one phy, different values makes trouble. */ ret = ieee802154_check_mac_settings(local, sdata, nsdata); if (ret < 0) return ret; } } return 0; } static int mac802154_wpan_open(struct net_device *dev) { int rc; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct wpan_dev *wpan_dev = &sdata->wpan_dev; rc = ieee802154_check_concurrent_iface(sdata, wpan_dev->iftype); if (rc < 0) return rc; return mac802154_slave_open(dev); } static int mac802154_slave_close(struct net_device *dev) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct ieee802154_local *local = sdata->local; ASSERT_RTNL(); if (mac802154_is_scanning(local)) mac802154_abort_scan_locked(local, sdata); if (mac802154_is_beaconing(local)) mac802154_stop_beacons_locked(local, sdata); netif_stop_queue(dev); local->open_count--; clear_bit(SDATA_STATE_RUNNING, &sdata->state); if (!local->open_count) ieee802154_stop_device(local); return 0; } static int mac802154_set_header_security(struct ieee802154_sub_if_data *sdata, struct ieee802154_hdr *hdr, const struct ieee802154_mac_cb *cb) { struct ieee802154_llsec_params params; u8 level; mac802154_llsec_get_params(&sdata->sec, ¶ms); if (!params.enabled && cb->secen_override && cb->secen) return -EINVAL; if (!params.enabled || (cb->secen_override && !cb->secen) || !params.out_level) return 0; if (cb->seclevel_override && !cb->seclevel) return -EINVAL; level = cb->seclevel_override ? cb->seclevel : params.out_level; hdr->fc.security_enabled = 1; hdr->sec.level = level; hdr->sec.key_id_mode = params.out_key.mode; if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX) hdr->sec.short_src = params.out_key.short_source; else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX) hdr->sec.extended_src = params.out_key.extended_source; hdr->sec.key_id = params.out_key.id; return 0; } static int ieee802154_header_create(struct sk_buff *skb, struct net_device *dev, const struct ieee802154_addr *daddr, const struct ieee802154_addr *saddr, unsigned len) { struct ieee802154_hdr hdr; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct ieee802154_mac_cb *cb = mac_cb(skb); int hlen; if (!daddr) return -EINVAL; memset(&hdr.fc, 0, sizeof(hdr.fc)); hdr.fc.type = cb->type; hdr.fc.security_enabled = cb->secen; hdr.fc.ack_request = cb->ackreq; hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF; if (mac802154_set_header_security(sdata, &hdr, cb) < 0) return -EINVAL; if (!saddr) { if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) || wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) || wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) { hdr.source.mode = IEEE802154_ADDR_LONG; hdr.source.extended_addr = wpan_dev->extended_addr; } else { hdr.source.mode = IEEE802154_ADDR_SHORT; hdr.source.short_addr = wpan_dev->short_addr; } hdr.source.pan_id = wpan_dev->pan_id; } else { hdr.source = *(const struct ieee802154_addr *)saddr; } hdr.dest = *(const struct ieee802154_addr *)daddr; hlen = ieee802154_hdr_push(skb, &hdr); if (hlen < 0) return -EINVAL; skb_reset_mac_header(skb); skb->mac_len = hlen; if (len > ieee802154_max_payload(&hdr)) return -EMSGSIZE; return hlen; } static const struct wpan_dev_header_ops ieee802154_header_ops = { .create = ieee802154_header_create, }; /* This header create functionality assumes a 8 byte array for * source and destination pointer at maximum. To adapt this for * the 802.15.4 dataframe header we use extended address handling * here only and intra pan connection. fc fields are mostly fallback * handling. For provide dev_hard_header for dgram sockets. */ static int mac802154_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct ieee802154_hdr hdr; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct ieee802154_mac_cb cb = { }; int hlen; if (!daddr) return -EINVAL; memset(&hdr.fc, 0, sizeof(hdr.fc)); hdr.fc.type = IEEE802154_FC_TYPE_DATA; hdr.fc.ack_request = wpan_dev->ackreq; hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF; /* TODO currently a workaround to give zero cb block to set * security parameters defaults according MIB. */ if (mac802154_set_header_security(sdata, &hdr, &cb) < 0) return -EINVAL; hdr.dest.pan_id = wpan_dev->pan_id; hdr.dest.mode = IEEE802154_ADDR_LONG; ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr); hdr.source.pan_id = hdr.dest.pan_id; hdr.source.mode = IEEE802154_ADDR_LONG; if (!saddr) hdr.source.extended_addr = wpan_dev->extended_addr; else ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr); hlen = ieee802154_hdr_push(skb, &hdr); if (hlen < 0) return -EINVAL; skb_reset_mac_header(skb); skb->mac_len = hlen; if (len > ieee802154_max_payload(&hdr)) return -EMSGSIZE; return hlen; } static int mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) { struct ieee802154_hdr hdr; if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) { pr_debug("malformed packet\n"); return 0; } if (hdr.source.mode == IEEE802154_ADDR_LONG) { ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr); return IEEE802154_EXTENDED_ADDR_LEN; } return 0; } static const struct header_ops mac802154_header_ops = { .create = mac802154_header_create, .parse = mac802154_header_parse, }; static const struct net_device_ops mac802154_wpan_ops = { .ndo_open = mac802154_wpan_open, .ndo_stop = mac802154_slave_close, .ndo_start_xmit = ieee802154_subif_start_xmit, .ndo_do_ioctl = mac802154_wpan_ioctl, .ndo_set_mac_address = mac802154_wpan_mac_addr, }; static const struct net_device_ops mac802154_monitor_ops = { .ndo_open = mac802154_wpan_open, .ndo_stop = mac802154_slave_close, .ndo_start_xmit = ieee802154_monitor_start_xmit, }; static void mac802154_wpan_free(struct net_device *dev) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); mac802154_llsec_destroy(&sdata->sec); } static void ieee802154_if_setup(struct net_device *dev) { dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN); /* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET * will not send frames without any payload, but ack frames * has no payload, so substract one that we can send a 3 bytes * frame. The xmit callback assumes at least a hard header where two * bytes fc and sequence field are set. */ dev->hard_header_len = IEEE802154_MIN_HEADER_LEN - 1; /* The auth_tag header is for security and places in private payload * room of mac frame which stucks between payload and FCS field. */ dev->needed_tailroom = IEEE802154_MAX_AUTH_TAG_LEN + IEEE802154_FCS_LEN; /* The mtu size is the payload without mac header in this case. * We have a dynamic length header with a minimum header length * which is hard_header_len. In this case we let mtu to the size * of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN - * hard_header_len. The FCS which is set by hardware or ndo_start_xmit * and the minimum mac header which can be evaluated inside driver * layer. The rest of mac header will be part of payload if greater * than hard_header_len. */ dev->mtu = IEEE802154_MTU - IEEE802154_FCS_LEN - dev->hard_header_len; dev->tx_queue_len = 300; dev->flags = IFF_NOARP | IFF_BROADCAST; } static int ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, enum nl802154_iftype type) { struct wpan_dev *wpan_dev = &sdata->wpan_dev; int ret; u8 tmp; /* set some type-dependent values */ sdata->wpan_dev.iftype = type; get_random_bytes(&tmp, sizeof(tmp)); atomic_set(&wpan_dev->bsn, tmp); get_random_bytes(&tmp, sizeof(tmp)); atomic_set(&wpan_dev->dsn, tmp); /* defaults per 802.15.4-2011 */ wpan_dev->min_be = 3; wpan_dev->max_be = 5; wpan_dev->csma_retries = 4; wpan_dev->frame_retries = 3; wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); switch (type) { case NL802154_IFTYPE_COORD: case NL802154_IFTYPE_NODE: ieee802154_be64_to_le64(&wpan_dev->extended_addr, sdata->dev->dev_addr); sdata->dev->header_ops = &mac802154_header_ops; sdata->dev->needs_free_netdev = true; sdata->dev->priv_destructor = mac802154_wpan_free; sdata->dev->netdev_ops = &mac802154_wpan_ops; sdata->dev->ml_priv = &mac802154_mlme_wpan; sdata->iface_default_filtering = IEEE802154_FILTERING_4_FRAME_FIELDS; wpan_dev->header_ops = &ieee802154_header_ops; mutex_init(&sdata->sec_mtx); mac802154_llsec_init(&sdata->sec); ret = mac802154_wpan_update_llsec(sdata->dev); if (ret < 0) return ret; break; case NL802154_IFTYPE_MONITOR: sdata->dev->needs_free_netdev = true; sdata->dev->netdev_ops = &mac802154_monitor_ops; sdata->iface_default_filtering = IEEE802154_FILTERING_NONE; break; default: BUG(); } return 0; } struct net_device * ieee802154_if_add(struct ieee802154_local *local, const char *name, unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr) { u8 addr[IEEE802154_EXTENDED_ADDR_LEN]; struct net_device *ndev = NULL; struct ieee802154_sub_if_data *sdata = NULL; int ret; ASSERT_RTNL(); ndev = alloc_netdev(sizeof(*sdata), name, name_assign_type, ieee802154_if_setup); if (!ndev) return ERR_PTR(-ENOMEM); ndev->needed_headroom = local->hw.extra_tx_headroom + IEEE802154_MAX_HEADER_LEN; ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) goto err; ieee802154_le64_to_be64(ndev->perm_addr, &local->hw.phy->perm_extended_addr); switch (type) { case NL802154_IFTYPE_COORD: case NL802154_IFTYPE_NODE: ndev->type = ARPHRD_IEEE802154; if (ieee802154_is_valid_extended_unicast_addr(extended_addr)) { ieee802154_le64_to_be64(addr, &extended_addr); dev_addr_set(ndev, addr); } else { dev_addr_set(ndev, ndev->perm_addr); } break; case NL802154_IFTYPE_MONITOR: ndev->type = ARPHRD_IEEE802154_MONITOR; break; default: ret = -EINVAL; goto err; } /* TODO check this */ SET_NETDEV_DEV(ndev, &local->phy->dev); dev_net_set(ndev, wpan_phy_net(local->hw.phy)); sdata = netdev_priv(ndev); ndev->ieee802154_ptr = &sdata->wpan_dev; memcpy(sdata->name, ndev->name, IFNAMSIZ); sdata->dev = ndev; sdata->wpan_dev.wpan_phy = local->hw.phy; sdata->local = local; INIT_LIST_HEAD(&sdata->wpan_dev.list); /* setup type-dependent data */ ret = ieee802154_setup_sdata(sdata, type); if (ret) goto err; ret = register_netdevice(ndev); if (ret < 0) goto err; mutex_lock(&local->iflist_mtx); list_add_tail_rcu(&sdata->list, &local->interfaces); mutex_unlock(&local->iflist_mtx); return ndev; err: free_netdev(ndev); return ERR_PTR(ret); } void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata) { ASSERT_RTNL(); mutex_lock(&sdata->local->iflist_mtx); if (list_empty(&sdata->local->interfaces)) { mutex_unlock(&sdata->local->iflist_mtx); return; } list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); synchronize_rcu(); unregister_netdevice(sdata->dev); } void ieee802154_remove_interfaces(struct ieee802154_local *local) { struct ieee802154_sub_if_data *sdata, *tmp; mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); unregister_netdevice(sdata->dev); } mutex_unlock(&local->iflist_mtx); } static int netdev_notify(struct notifier_block *nb, unsigned long state, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ieee802154_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) return NOTIFY_DONE; if (!dev->ieee802154_ptr || !dev->ieee802154_ptr->wpan_phy) return NOTIFY_DONE; if (dev->ieee802154_ptr->wpan_phy->privid != mac802154_wpan_phy_privid) return NOTIFY_DONE; sdata = IEEE802154_DEV_TO_SUB_IF(dev); memcpy(sdata->name, dev->name, IFNAMSIZ); return NOTIFY_OK; } static struct notifier_block mac802154_netdev_notifier = { .notifier_call = netdev_notify, }; int ieee802154_iface_init(void) { return register_netdevice_notifier(&mac802154_netdev_notifier); } void ieee802154_iface_exit(void) { unregister_netdevice_notifier(&mac802154_netdev_notifier); } |
15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | #undef TRACE_SYSTEM #define TRACE_SYSTEM qdisc #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_QDISC_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> #include <linux/pkt_sched.h> #include <net/sch_generic.h> TRACE_EVENT(qdisc_dequeue, TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, int packets, struct sk_buff *skb), TP_ARGS(qdisc, txq, packets, skb), TP_STRUCT__entry( __field( struct Qdisc *, qdisc ) __field(const struct netdev_queue *, txq ) __field( int, packets ) __field( void *, skbaddr ) __field( int, ifindex ) __field( u32, handle ) __field( u32, parent ) __field( unsigned long, txq_state) ), /* skb==NULL indicate packets dequeued was 0, even when packets==1 */ TP_fast_assign( __entry->qdisc = qdisc; __entry->txq = txq; __entry->packets = skb ? packets : 0; __entry->skbaddr = skb; __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; __entry->handle = qdisc->handle; __entry->parent = qdisc->parent; __entry->txq_state = txq->state; ), TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); TRACE_EVENT(qdisc_enqueue, TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb), TP_ARGS(qdisc, txq, skb), TP_STRUCT__entry( __field(struct Qdisc *, qdisc) __field(const struct netdev_queue *, txq) __field(void *, skbaddr) __field(int, ifindex) __field(u32, handle) __field(u32, parent) ), TP_fast_assign( __entry->qdisc = qdisc; __entry->txq = txq; __entry->skbaddr = skb; __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; __entry->handle = qdisc->handle; __entry->parent = qdisc->parent; ), TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr) ); TRACE_EVENT(qdisc_reset, TP_PROTO(struct Qdisc *q), TP_ARGS(q), TP_STRUCT__entry( __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" ) __string( kind, q->ops->id ) __field( u32, parent ) __field( u32, handle ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) ); TRACE_EVENT(qdisc_destroy, TP_PROTO(struct Qdisc *q), TP_ARGS(q), TP_STRUCT__entry( __string( dev, qdisc_dev(q)->name ) __string( kind, q->ops->id ) __field( u32, parent ) __field( u32, handle ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) ); TRACE_EVENT(qdisc_create, TP_PROTO(const struct Qdisc_ops *ops, struct net_device *dev, u32 parent), TP_ARGS(ops, dev, parent), TP_STRUCT__entry( __string( dev, dev->name ) __string( kind, ops->id ) __field( u32, parent ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = parent; ), TP_printk("dev=%s kind=%s parent=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent)) ); #endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
8 8 1 8 8 8 8 8 7 7 7 7 7 7 7 7 4 4 4 3 2 2 2 2 1 4 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Pixart PAC7302 driver * * Copyright (C) 2008-2012 Jean-Francois Moine <http://moinejf.free.fr> * Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li * * Separated from Pixart PAC7311 library by Márton Németh * Camera button input handling by Márton Németh <nm127@freemail.hu> * Copyright (C) 2009-2010 Márton Németh <nm127@freemail.hu> */ /* * Some documentation about various registers as determined by trial and error. * * Register page 0: * * Address Description * 0x01 Red balance control * 0x02 Green balance control * 0x03 Blue balance control * The Windows driver uses a quadratic approach to map * the settable values (0-200) on register values: * min=0x20, default=0x40, max=0x80 * 0x0f-0x20 Color and saturation control * 0xa2-0xab Brightness, contrast and gamma control * 0xb6 Sharpness control (bits 0-4) * * Register page 1: * * Address Description * 0x78 Global control, bit 6 controls the LED (inverted) * 0x80 Compression balance, 2 interesting settings: * 0x0f Default * 0x50 Values >= this switch the camera to a lower compression, * using the same table for both luminance and chrominance. * This gives a sharper picture. Only usable when running * at < 15 fps! Note currently the driver does not use this * as the quality gain is small and the generated JPG-s are * only understood by v4l-utils >= 0.8.9 * * Register page 3: * * Address Description * 0x02 Clock divider 3-63, fps = 90 / val. Must be a multiple of 3 on * the 7302, so one of 3, 6, 9, ..., except when between 6 and 12? * 0x03 Variable framerate ctrl reg2==3: 0 -> ~30 fps, 255 -> ~22fps * 0x04 Another var framerate ctrl reg2==3, reg3==0: 0 -> ~30 fps, * 63 -> ~27 fps, the 2 msb's must always be 1 !! * 0x05 Another var framerate ctrl reg2==3, reg3==0, reg4==0xc0: * 1 -> ~30 fps, 2 -> ~20 fps * 0x0e Exposure bits 0-7, 0-448, 0 = use full frame time * 0x0f Exposure bit 8, 0-448, 448 = no exposure at all * 0x10 Gain 0-31 * 0x12 Another gain 0-31, unlike 0x10 this one seems to start with an * amplification value of 1 rather then 0 at its lowest setting * 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused * 0x80 Another framerate control, best left at 1, moving it from 1 to * 2 causes the framerate to become 3/4th of what it was, and * also seems to cause pixel averaging, resulting in an effective * resolution of 320x240 and thus a much blockier image * * The registers are accessed in the following functions: * * Page | Register | Function * -----+------------+--------------------------------------------------- * 0 | 0x01 | setredbalance() * 0 | 0x03 | setbluebalance() * 0 | 0x0f..0x20 | setcolors() * 0 | 0xa2..0xab | setbrightcont() * 0 | 0xb6 | setsharpness() * 0 | 0xc6 | setwhitebalance() * 0 | 0xdc | setbrightcont(), setcolors() * 3 | 0x02 | setexposure() * 3 | 0x10, 0x12 | setgain() * 3 | 0x11 | setcolors(), setgain(), setexposure(), sethvflip() * 3 | 0x21 | sethvflip() */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/input.h> #include "gspca.h" /* Include pac common sof detection functions */ #include "pac_common.h" #define PAC7302_RGB_BALANCE_MIN 0 #define PAC7302_RGB_BALANCE_MAX 200 #define PAC7302_RGB_BALANCE_DEFAULT 100 #define PAC7302_GAIN_DEFAULT 15 #define PAC7302_GAIN_KNEE 42 #define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */ #define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Thomas Kaiser thomas@kaiser-linux.li"); MODULE_DESCRIPTION("Pixart PAC7302"); MODULE_LICENSE("GPL"); struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct { /* brightness / contrast cluster */ struct v4l2_ctrl *brightness; struct v4l2_ctrl *contrast; }; struct v4l2_ctrl *saturation; struct v4l2_ctrl *white_balance; struct v4l2_ctrl *red_balance; struct v4l2_ctrl *blue_balance; struct { /* flip cluster */ struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; }; struct v4l2_ctrl *sharpness; u8 flags; #define FL_HFLIP 0x01 /* mirrored by default */ #define FL_VFLIP 0x02 /* vertical flipped by default */ u8 sof_read; s8 autogain_ignore_frames; atomic_t avg_lum; }; static const struct v4l2_pix_format vga_mode[] = { {640, 480, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, }, }; #define LOAD_PAGE3 255 #define END_OF_SEQUENCE 0 static const u8 init_7302[] = { /* index,value */ 0xff, 0x01, /* page 1 */ 0x78, 0x00, /* deactivate */ 0xff, 0x01, 0x78, 0x40, /* led off */ }; static const u8 start_7302[] = { /* index, len, [value]* */ 0xff, 1, 0x00, /* page 0 */ 0x00, 12, 0x01, 0x40, 0x40, 0x40, 0x01, 0xe0, 0x02, 0x80, 0x00, 0x00, 0x00, 0x00, 0x0d, 24, 0x03, 0x01, 0x00, 0xb5, 0x07, 0xcb, 0x00, 0x00, 0x07, 0xc8, 0x00, 0xea, 0x07, 0xcf, 0x07, 0xf7, 0x07, 0x7e, 0x01, 0x0b, 0x00, 0x00, 0x00, 0x11, 0x26, 2, 0xaa, 0xaa, 0x2e, 1, 0x31, 0x38, 1, 0x01, 0x3a, 3, 0x14, 0xff, 0x5a, 0x43, 11, 0x00, 0x0a, 0x18, 0x11, 0x01, 0x2c, 0x88, 0x11, 0x00, 0x54, 0x11, 0x55, 1, 0x00, 0x62, 4, 0x10, 0x1e, 0x1e, 0x18, 0x6b, 1, 0x00, 0x6e, 3, 0x08, 0x06, 0x00, 0x72, 3, 0x00, 0xff, 0x00, 0x7d, 23, 0x01, 0x01, 0x58, 0x46, 0x50, 0x3c, 0x50, 0x3c, 0x54, 0x46, 0x54, 0x56, 0x52, 0x50, 0x52, 0x50, 0x56, 0x64, 0xa4, 0x00, 0xda, 0x00, 0x00, 0xa2, 10, 0x22, 0x2c, 0x3c, 0x54, 0x69, 0x7c, 0x9c, 0xb9, 0xd2, 0xeb, 0xaf, 1, 0x02, 0xb5, 2, 0x08, 0x08, 0xb8, 2, 0x08, 0x88, 0xc4, 4, 0xae, 0x01, 0x04, 0x01, 0xcc, 1, 0x00, 0xd1, 11, 0x01, 0x30, 0x49, 0x5e, 0x6f, 0x7f, 0x8e, 0xa9, 0xc1, 0xd7, 0xec, 0xdc, 1, 0x01, 0xff, 1, 0x01, /* page 1 */ 0x12, 3, 0x02, 0x00, 0x01, 0x3e, 2, 0x00, 0x00, 0x76, 5, 0x01, 0x20, 0x40, 0x00, 0xf2, 0x7c, 1, 0x00, 0x7f, 10, 0x4b, 0x0f, 0x01, 0x2c, 0x02, 0x58, 0x03, 0x20, 0x02, 0x00, 0x96, 5, 0x01, 0x10, 0x04, 0x01, 0x04, 0xc8, 14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x07, 0x00, 0x01, 0x07, 0x04, 0x01, 0xd8, 1, 0x01, 0xdb, 2, 0x00, 0x01, 0xde, 7, 0x00, 0x01, 0x04, 0x04, 0x00, 0x00, 0x00, 0xe6, 4, 0x00, 0x00, 0x00, 0x01, 0xeb, 1, 0x00, 0xff, 1, 0x02, /* page 2 */ 0x22, 1, 0x00, 0xff, 1, 0x03, /* page 3 */ 0, LOAD_PAGE3, /* load the page 3 */ 0x11, 1, 0x01, 0xff, 1, 0x02, /* page 2 */ 0x13, 1, 0x00, 0x22, 4, 0x1f, 0xa4, 0xf0, 0x96, 0x27, 2, 0x14, 0x0c, 0x2a, 5, 0xc8, 0x00, 0x18, 0x12, 0x22, 0x64, 8, 0x00, 0x00, 0xf0, 0x01, 0x14, 0x44, 0x44, 0x44, 0x6e, 1, 0x08, 0xff, 1, 0x01, /* page 1 */ 0x78, 1, 0x00, 0, END_OF_SEQUENCE /* end of sequence */ }; #define SKIP 0xaa /* page 3 - the value SKIP says skip the index - see reg_w_page() */ static const u8 page3_7302[] = { 0x90, 0x40, 0x03, 0x00, 0xc0, 0x01, 0x14, 0x16, 0x14, 0x12, 0x00, 0x00, 0x00, 0x02, 0x33, 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x01, 0xb3, 0x01, 0x00, 0x00, 0x08, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x54, 0xf4, 0x02, 0x52, 0x54, 0xa4, 0xb8, 0xe0, 0x2a, 0xf6, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, 0xf2, 0x1f, 0x04, 0x00, 0x00, SKIP, 0x00, 0x00, 0xc0, 0xc0, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xff, 0x03, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc8, 0xc8, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x08, 0x10, 0x24, 0x40, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xfa, 0x00, 0x64, 0x5a, 0x28, 0x00, 0x00 }; static void reg_w_buf(struct gspca_dev *gspca_dev, u8 index, const u8 *buffer, int len) { int ret; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, buffer, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_w_buf failed i: %02x error %d\n", index, ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u8 index, u8 value) { int ret; if (gspca_dev->usb_err < 0) return; gspca_dev->usb_buf[0] = value; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w() failed i: %02x v: %02x error %d\n", index, value, ret); gspca_dev->usb_err = ret; } } static void reg_w_seq(struct gspca_dev *gspca_dev, const u8 *seq, int len) { while (--len >= 0) { reg_w(gspca_dev, seq[0], seq[1]); seq += 2; } } /* load the beginning of a page */ static void reg_w_page(struct gspca_dev *gspca_dev, const u8 *page, int len) { int index; int ret = 0; if (gspca_dev->usb_err < 0) return; for (index = 0; index < len; index++) { if (page[index] == SKIP) /* skip this index */ continue; gspca_dev->usb_buf[0] = page[index]; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_w_page() failed i: %02x v: %02x error %d\n", index, page[index], ret); gspca_dev->usb_err = ret; break; } } } /* output a variable sequence */ static void reg_w_var(struct gspca_dev *gspca_dev, const u8 *seq, const u8 *page3, unsigned int page3_len) { int index, len; for (;;) { index = *seq++; len = *seq++; switch (len) { case END_OF_SEQUENCE: return; case LOAD_PAGE3: reg_w_page(gspca_dev, page3, page3_len); break; default: if (len > USB_BUF_SZ) { gspca_err(gspca_dev, "Incorrect variable sequence\n"); return; } while (len > 0) { if (len < 8) { reg_w_buf(gspca_dev, index, seq, len); seq += len; break; } reg_w_buf(gspca_dev, index, seq, 8); seq += 8; index += 8; len -= 8; } } } /* not reached */ } /* this function is called at probe time for pac7302 */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; /* only 640x480 */ cam->nmodes = ARRAY_SIZE(vga_mode); sd->flags = id->driver_info; return 0; } static void setbrightcont(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, v; static const u8 max[10] = {0x29, 0x33, 0x42, 0x5a, 0x6e, 0x80, 0x9f, 0xbb, 0xd4, 0xec}; static const u8 delta[10] = {0x35, 0x33, 0x33, 0x2f, 0x2a, 0x25, 0x1e, 0x17, 0x11, 0x0b}; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ for (i = 0; i < 10; i++) { v = max[i]; v += (sd->brightness->val - (s32)sd->brightness->maximum) * 150 / (s32)sd->brightness->maximum; /* 200 ? */ v -= delta[i] * sd->contrast->val / (s32)sd->contrast->maximum; if (v < 0) v = 0; else if (v > 0xff) v = 0xff; reg_w(gspca_dev, 0xa2 + i, v); } reg_w(gspca_dev, 0xdc, 0x01); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, v; static const int a[9] = {217, -212, 0, -101, 170, -67, -38, -315, 355}; static const int b[9] = {19, 106, 0, 19, 106, 1, 19, 106, 1}; reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ reg_w(gspca_dev, 0x11, 0x01); reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ for (i = 0; i < 9; i++) { v = a[i] * sd->saturation->val / (s32)sd->saturation->maximum; v += b[i]; reg_w(gspca_dev, 0x0f + 2 * i, (v >> 8) & 0x07); reg_w(gspca_dev, 0x0f + 2 * i + 1, v); } reg_w(gspca_dev, 0xdc, 0x01); } static void setwhitebalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0xc6, sd->white_balance->val); reg_w(gspca_dev, 0xdc, 0x01); } static u8 rgbbalance_ctrl_to_reg_value(s32 rgb_ctrl_val) { const unsigned int k = 1000; /* precision factor */ unsigned int norm; /* Normed value [0...k] */ norm = k * (rgb_ctrl_val - PAC7302_RGB_BALANCE_MIN) / (PAC7302_RGB_BALANCE_MAX - PAC7302_RGB_BALANCE_MIN); /* Qudratic apporach improves control at small (register) values: */ return 64 * norm * norm / (k*k) + 32 * norm / k + 32; /* Y = 64*X*X + 32*X + 32 * => register values 0x20-0x80; Windows driver uses these limits */ /* NOTE: for full value range (0x00-0xff) use * Y = 254*X*X + X * => 254 * norm * norm / (k*k) + 1 * norm / k */ } static void setredbalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0x01, rgbbalance_ctrl_to_reg_value(sd->red_balance->val)); reg_w(gspca_dev, 0xdc, 0x01); } static void setbluebalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0x03, rgbbalance_ctrl_to_reg_value(sd->blue_balance->val)); reg_w(gspca_dev, 0xdc, 0x01); } static void setgain(struct gspca_dev *gspca_dev) { u8 reg10, reg12; if (gspca_dev->gain->val < 32) { reg10 = gspca_dev->gain->val; reg12 = 0; } else { reg10 = 31; reg12 = gspca_dev->gain->val - 31; } reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ reg_w(gspca_dev, 0x10, reg10); reg_w(gspca_dev, 0x12, reg12); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setexposure(struct gspca_dev *gspca_dev) { u8 clockdiv; u16 exposure; /* * Register 2 of frame 3 contains the clock divider configuring the * no fps according to the formula: 90 / reg. sd->exposure is the * desired exposure time in 0.5 ms. */ clockdiv = (90 * gspca_dev->exposure->val + 1999) / 2000; /* * Note clockdiv = 3 also works, but when running at 30 fps, depending * on the scene being recorded, the camera switches to another * quantization table for certain JPEG blocks, and we don't know how * to decompress these blocks. So we cap the framerate at 15 fps. */ if (clockdiv < 6) clockdiv = 6; else if (clockdiv > 63) clockdiv = 63; /* * Register 2 MUST be a multiple of 3, except when between 6 and 12? * Always round up, otherwise we cannot get the desired frametime * using the partial frame time exposure control. */ if (clockdiv < 6 || clockdiv > 12) clockdiv = ((clockdiv + 2) / 3) * 3; /* * frame exposure time in ms = 1000 * clockdiv / 90 -> * exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90) */ exposure = (gspca_dev->exposure->val * 45 * 448) / (1000 * clockdiv); /* 0 = use full frametime, 448 = no exposure, reverse it */ exposure = 448 - exposure; reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ reg_w(gspca_dev, 0x02, clockdiv); reg_w(gspca_dev, 0x0e, exposure & 0xff); reg_w(gspca_dev, 0x0f, exposure >> 8); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void sethvflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 data, hflip, vflip; hflip = sd->hflip->val; if (sd->flags & FL_HFLIP) hflip = !hflip; vflip = sd->vflip->val; if (sd->flags & FL_VFLIP) vflip = !vflip; reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ data = (hflip ? 0x08 : 0x00) | (vflip ? 0x04 : 0x00); reg_w(gspca_dev, 0x21, data); /* load registers to sensor (Bit 0, auto clear) */ reg_w(gspca_dev, 0x11, 0x01); } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, 0xb6, sd->sharpness->val); reg_w(gspca_dev, 0xdc, 0x01); } /* this function is called at probe and resume time for pac7302 */ static int sd_init(struct gspca_dev *gspca_dev) { reg_w_seq(gspca_dev, init_7302, sizeof(init_7302)/2); return gspca_dev->usb_err; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) { /* when switching to autogain set defaults to make sure we are on a valid point of the autogain gain / exposure knee graph, and give this change time to take effect before doing autogain. */ gspca_dev->exposure->val = PAC7302_EXPOSURE_DEFAULT; gspca_dev->gain->val = PAC7302_GAIN_DEFAULT; sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; } if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightcont(gspca_dev); break; case V4L2_CID_SATURATION: setcolors(gspca_dev); break; case V4L2_CID_WHITE_BALANCE_TEMPERATURE: setwhitebalance(gspca_dev); break; case V4L2_CID_RED_BALANCE: setredbalance(gspca_dev); break; case V4L2_CID_BLUE_BALANCE: setbluebalance(gspca_dev); break; case V4L2_CID_AUTOGAIN: if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val)) setexposure(gspca_dev); if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val)) setgain(gspca_dev); break; case V4L2_CID_HFLIP: sethvflip(gspca_dev); break; case V4L2_CID_SHARPNESS: setsharpness(gspca_dev); break; default: return -EINVAL; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; /* this function is called at probe time */ static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 12); sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 32, 1, 16); sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); sd->saturation = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 127); sd->white_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE, 0, 255, 1, 55); sd->red_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, PAC7302_RGB_BALANCE_MIN, PAC7302_RGB_BALANCE_MAX, 1, PAC7302_RGB_BALANCE_DEFAULT); sd->blue_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, PAC7302_RGB_BALANCE_MIN, PAC7302_RGB_BALANCE_MAX, 1, PAC7302_RGB_BALANCE_DEFAULT); gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 1023, 1, PAC7302_EXPOSURE_DEFAULT); gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 62, 1, PAC7302_GAIN_DEFAULT); sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); sd->sharpness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 15, 1, 8); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->brightness); v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); v4l2_ctrl_cluster(2, &sd->hflip); return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w_var(gspca_dev, start_7302, page3_7302, sizeof(page3_7302)); sd->sof_read = 0; sd->autogain_ignore_frames = 0; atomic_set(&sd->avg_lum, 270 + sd->brightness->val); /* start stream */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x01); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* stop stream */ reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x00); } /* called on streamoff with alt 0 and on disconnect for pac7302 */ static void sd_stop0(struct gspca_dev *gspca_dev) { if (!gspca_dev->present) return; reg_w(gspca_dev, 0xff, 0x01); reg_w(gspca_dev, 0x78, 0x40); } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int avg_lum = atomic_read(&sd->avg_lum); int desired_lum; const int deadzone = 30; if (sd->autogain_ignore_frames < 0) return; if (sd->autogain_ignore_frames > 0) { sd->autogain_ignore_frames--; } else { desired_lum = 270 + sd->brightness->val; if (gspca_expo_autogain(gspca_dev, avg_lum, desired_lum, deadzone, PAC7302_GAIN_KNEE, PAC7302_EXPOSURE_KNEE)) sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES; } } /* JPEG header */ static const u8 jpeg_header[] = { 0xff, 0xd8, /* SOI: Start of Image */ 0xff, 0xc0, /* SOF0: Start of Frame (Baseline DCT) */ 0x00, 0x11, /* length = 17 bytes (including this length field) */ 0x08, /* Precision: 8 */ 0x02, 0x80, /* height = 640 (image rotated) */ 0x01, 0xe0, /* width = 480 */ 0x03, /* Number of image components: 3 */ 0x01, 0x21, 0x00, /* ID=1, Subsampling 1x1, Quantization table: 0 */ 0x02, 0x11, 0x01, /* ID=2, Subsampling 2x1, Quantization table: 1 */ 0x03, 0x11, 0x01, /* ID=3, Subsampling 2x1, Quantization table: 1 */ 0xff, 0xda, /* SOS: Start Of Scan */ 0x00, 0x0c, /* length = 12 bytes (including this length field) */ 0x03, /* number of components: 3 */ 0x01, 0x00, /* selector 1, table 0x00 */ 0x02, 0x11, /* selector 2, table 0x11 */ 0x03, 0x11, /* selector 3, table 0x11 */ 0x00, 0x3f, /* Spectral selection: 0 .. 63 */ 0x00 /* Successive approximation: 0 */ }; /* this function is run at interrupt level */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; u8 *image; u8 *sof; sof = pac_find_sof(gspca_dev, &sd->sof_read, data, len); if (sof) { int n, lum_offset, footer_length; /* * 6 bytes after the FF D9 EOF marker a number of lumination * bytes are send corresponding to different parts of the * image, the 14th and 15th byte after the EOF seem to * correspond to the center of the image. */ lum_offset = 61 + sizeof pac_sof_marker; footer_length = 74; /* Finish decoding current frame */ n = (sof - data) - (footer_length + sizeof pac_sof_marker); if (n < 0) { gspca_dev->image_len += n; } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, n); } image = gspca_dev->image; if (image != NULL && image[gspca_dev->image_len - 2] == 0xff && image[gspca_dev->image_len - 1] == 0xd9) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); n = sof - data; len -= n; data = sof; /* Get average lumination */ if (gspca_dev->last_packet_type == LAST_PACKET && n >= lum_offset) atomic_set(&sd->avg_lum, data[-lum_offset] + data[-lum_offset + 1]); /* Start the new frame with the jpeg header */ /* The PAC7302 has the image rotated 90 degrees */ gspca_frame_add(gspca_dev, FIRST_PACKET, jpeg_header, sizeof jpeg_header); } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int sd_dbg_s_register(struct gspca_dev *gspca_dev, const struct v4l2_dbg_register *reg) { u8 index; u8 value; /* * reg->reg: bit0..15: reserved for register index (wIndex is 16bit * long on the USB bus) */ if (reg->match.addr == 0 && (reg->reg < 0x000000ff) && (reg->val <= 0x000000ff) ) { /* Currently writing to page 0 is only supported. */ /* reg_w() only supports 8bit index */ index = reg->reg; value = reg->val; /* * Note that there shall be no access to other page * by any other function between the page switch and * the actual register write. */ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ reg_w(gspca_dev, index, value); reg_w(gspca_dev, 0xdc, 0x01); } return gspca_dev->usb_err; } #endif #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; u8 data0, data1; if (len == 2) { data0 = data[0]; data1 = data[1]; if ((data0 == 0x00 && data1 == 0x11) || (data0 == 0x22 && data1 == 0x33) || (data0 == 0x44 && data1 == 0x55) || (data0 == 0x66 && data1 == 0x77) || (data0 == 0x88 && data1 == 0x99) || (data0 == 0xaa && data1 == 0xbb) || (data0 == 0xcc && data1 == 0xdd) || (data0 == 0xee && data1 == 0xff)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } } return ret; } #endif /* sub-driver description for pac7302 */ static const struct sd_desc sd_desc = { .name = KBUILD_MODNAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, #ifdef CONFIG_VIDEO_ADV_DEBUG .set_register = sd_dbg_s_register, #endif #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06f8, 0x3009)}, {USB_DEVICE(0x06f8, 0x301b)}, {USB_DEVICE(0x093a, 0x2620)}, {USB_DEVICE(0x093a, 0x2621)}, {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2625)}, {USB_DEVICE(0x093a, 0x2626)}, {USB_DEVICE(0x093a, 0x2627), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2628)}, {USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x262a)}, {USB_DEVICE(0x093a, 0x262c)}, {USB_DEVICE(0x145f, 0x013c)}, {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = KBUILD_MODNAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
3213 3377 3276 3441 204 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | /* SPDX-License-Identifier: GPL-2.0 */ /* * This header provides generic wrappers for memory access instrumentation that * the compiler cannot emit for: KASAN, KCSAN, KMSAN. */ #ifndef _LINUX_INSTRUMENTED_H #define _LINUX_INSTRUMENTED_H #include <linux/compiler.h> #include <linux/kasan-checks.h> #include <linux/kcsan-checks.h> #include <linux/kmsan-checks.h> #include <linux/types.h> /** * instrument_read - instrument regular read access * @v: address of access * @size: size of access * * Instrument a regular read access. The instrumentation should be inserted * before the actual read happens. */ static __always_inline void instrument_read(const volatile void *v, size_t size) { kasan_check_read(v, size); kcsan_check_read(v, size); } /** * instrument_write - instrument regular write access * @v: address of access * @size: size of access * * Instrument a regular write access. The instrumentation should be inserted * before the actual write happens. */ static __always_inline void instrument_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_write(v, size); } /** * instrument_read_write - instrument regular read-write access * @v: address of access * @size: size of access * * Instrument a regular write access. The instrumentation should be inserted * before the actual write happens. */ static __always_inline void instrument_read_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_read_write(v, size); } /** * instrument_atomic_read - instrument atomic read access * @v: address of access * @size: size of access * * Instrument an atomic read access. The instrumentation should be inserted * before the actual read happens. */ static __always_inline void instrument_atomic_read(const volatile void *v, size_t size) { kasan_check_read(v, size); kcsan_check_atomic_read(v, size); } /** * instrument_atomic_write - instrument atomic write access * @v: address of access * @size: size of access * * Instrument an atomic write access. The instrumentation should be inserted * before the actual write happens. */ static __always_inline void instrument_atomic_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_atomic_write(v, size); } /** * instrument_atomic_read_write - instrument atomic read-write access * @v: address of access * @size: size of access * * Instrument an atomic read-write access. The instrumentation should be * inserted before the actual write happens. */ static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size) { kasan_check_write(v, size); kcsan_check_atomic_read_write(v, size); } /** * instrument_copy_to_user - instrument reads of copy_to_user * @to: destination address * @from: source address * @n: number of bytes to copy * * Instrument reads from kernel memory, that are due to copy_to_user (and * variants). The instrumentation must be inserted before the accesses. */ static __always_inline void instrument_copy_to_user(void __user *to, const void *from, unsigned long n) { kasan_check_read(from, n); kcsan_check_read(from, n); kmsan_copy_to_user(to, from, n, 0); } /** * instrument_copy_from_user_before - add instrumentation before copy_from_user * @to: destination address * @from: source address * @n: number of bytes to copy * * Instrument writes to kernel memory, that are due to copy_from_user (and * variants). The instrumentation should be inserted before the accesses. */ static __always_inline void instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n) { kasan_check_write(to, n); kcsan_check_write(to, n); } /** * instrument_copy_from_user_after - add instrumentation after copy_from_user * @to: destination address * @from: source address * @n: number of bytes to copy * @left: number of bytes not copied (as returned by copy_from_user) * * Instrument writes to kernel memory, that are due to copy_from_user (and * variants). The instrumentation should be inserted after the accesses. */ static __always_inline void instrument_copy_from_user_after(const void *to, const void __user *from, unsigned long n, unsigned long left) { kmsan_unpoison_memory(to, n - left); } /** * instrument_memcpy_before - add instrumentation before non-instrumented memcpy * @to: destination address * @from: source address * @n: number of bytes to copy * * Instrument memory accesses that happen in custom memcpy implementations. The * instrumentation should be inserted before the memcpy call. */ static __always_inline void instrument_memcpy_before(void *to, const void *from, unsigned long n) { kasan_check_write(to, n); kasan_check_read(from, n); kcsan_check_write(to, n); kcsan_check_read(from, n); } /** * instrument_memcpy_after - add instrumentation after non-instrumented memcpy * @to: destination address * @from: source address * @n: number of bytes to copy * @left: number of bytes not copied (if known) * * Instrument memory accesses that happen in custom memcpy implementations. The * instrumentation should be inserted after the memcpy call. */ static __always_inline void instrument_memcpy_after(void *to, const void *from, unsigned long n, unsigned long left) { kmsan_memmove(to, from, n - left); } /** * instrument_get_user() - add instrumentation to get_user()-like macros * @to: destination variable, may not be address-taken * * get_user() and friends are fragile, so it may depend on the implementation * whether the instrumentation happens before or after the data is copied from * the userspace. */ #define instrument_get_user(to) \ ({ \ u64 __tmp = (u64)(to); \ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \ to = __tmp; \ }) /** * instrument_put_user() - add instrumentation to put_user()-like macros * @from: source address * @ptr: userspace pointer to copy to * @size: number of bytes to copy * * put_user() and friends are fragile, so it may depend on the implementation * whether the instrumentation happens before or after the data is copied from * the userspace. */ #define instrument_put_user(from, ptr, size) \ ({ \ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \ }) #endif /* _LINUX_INSTRUMENTED_H */ |
26 26 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | /* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/xattr.h Extended attributes handling. Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org> Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> */ #ifndef _LINUX_XATTR_H #define _LINUX_XATTR_H #include <linux/slab.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/user_namespace.h> #include <uapi/linux/xattr.h> /* List of all open_how "versions". */ #define XATTR_ARGS_SIZE_VER0 16 /* sizeof first published struct */ #define XATTR_ARGS_SIZE_LATEST XATTR_ARGS_SIZE_VER0 struct inode; struct dentry; static inline bool is_posix_acl_xattr(const char *name) { return (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0); } /* * struct xattr_handler: When @name is set, match attributes with exactly that * name. When @prefix is set instead, match attributes with that prefix and * with a non-empty suffix. */ struct xattr_handler { const char *name; const char *prefix; int flags; /* fs private flags */ bool (*list)(struct dentry *dentry); int (*get)(const struct xattr_handler *, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); int (*set)(const struct xattr_handler *, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags); }; /** * xattr_handler_can_list - check whether xattr can be listed * @handler: handler for this type of xattr * @dentry: dentry whose inode xattr to list * * Determine whether the xattr associated with @dentry can be listed given * @handler. * * Return: true if xattr can be listed, false if not. */ static inline bool xattr_handler_can_list(const struct xattr_handler *handler, struct dentry *dentry) { return handler && (!handler->list || handler->list(dentry)); } const char *xattr_full_name(const struct xattr_handler *, const char *); struct xattr { const char *name; void *value; size_t value_len; }; ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t); ssize_t vfs_getxattr(struct mnt_idmap *, struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct mnt_idmap *, struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int); int __vfs_setxattr_locked(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct mnt_idmap *, struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *); int __vfs_removexattr_locked(struct mnt_idmap *, struct dentry *, const char *, struct inode **); int vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); int vfs_getxattr_alloc(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); int xattr_supports_user_prefix(struct inode *inode); static inline const char *xattr_prefix(const struct xattr_handler *handler) { return handler->prefix ?: handler->name; } struct simple_xattrs { struct rb_root rb_root; rwlock_t lock; }; struct simple_xattr { struct rb_node rb_node; char *name; size_t size; char value[]; }; void simple_xattrs_init(struct simple_xattrs *xattrs); void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space); size_t simple_xattr_space(const char *name, size_t size); struct simple_xattr *simple_xattr_alloc(const void *value, size_t size); void simple_xattr_free(struct simple_xattr *xattr); int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs, const char *name, const void *value, size_t size, int flags); ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_add(struct simple_xattrs *xattrs, struct simple_xattr *new_xattr); int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name); #endif /* _LINUX_XATTR_H */ |
6 6 1 5 5 1 6 6 5 5 6 5 4 3 3 4 4 4 2 4 4 1 1 1 1 2 1 4 3 4 1 4 4 4 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 | // SPDX-License-Identifier: GPL-2.0-or-later /* * sonix sn9c102 (bayer) library * * Copyright (C) 2009-2011 Jean-François Moine <http://moinejf.free.fr> * Copyright (C) 2003 2004 Michel Xhaard mxhaard@magic.fr * Add Pas106 Stefano Mozzi (C) 2004 */ /* Some documentation on known sonixb registers: Reg Use sn9c101 / sn9c102: 0x10 high nibble red gain low nibble blue gain 0x11 low nibble green gain sn9c103: 0x05 red gain 0-127 0x06 blue gain 0-127 0x07 green gain 0-127 all: 0x08-0x0f i2c / 3wire registers 0x12 hstart 0x13 vstart 0x15 hsize (hsize = register-value * 16) 0x16 vsize (vsize = register-value * 16) 0x17 bit 0 toggle compression quality (according to sn9c102 driver) 0x18 bit 7 enables compression, bit 4-5 set image down scaling: 00 scale 1, 01 scale 1/2, 10, scale 1/4 0x19 high-nibble is sensor clock divider, changes exposure on sensors which use a clock generated by the bridge. Some sensors have their own clock. 0x1c auto_exposure area (for avg_lum) startx (startx = register-value * 32) 0x1d auto_exposure area (for avg_lum) starty (starty = register-value * 32) 0x1e auto_exposure area (for avg_lum) stopx (hsize = (0x1e - 0x1c) * 32) 0x1f auto_exposure area (for avg_lum) stopy (vsize = (0x1f - 0x1d) * 32) */ #define MODULE_NAME "sonixb" #include <linux/input.h> #include "gspca.h" MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("GSPCA/SN9C102 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *brightness; struct v4l2_ctrl *plfreq; atomic_t avg_lum; int prev_avg_lum; int exposure_knee; int header_read; u8 header[12]; /* Header without sof marker */ unsigned char autogain_ignore_frames; unsigned char frames_to_drop; __u8 bridge; /* Type of bridge */ #define BRIDGE_101 0 #define BRIDGE_102 0 /* We make no difference between 101 and 102 */ #define BRIDGE_103 1 __u8 sensor; /* Type of image sensor chip */ #define SENSOR_HV7131D 0 #define SENSOR_HV7131R 1 #define SENSOR_OV6650 2 #define SENSOR_OV7630 3 #define SENSOR_PAS106 4 #define SENSOR_PAS202 5 #define SENSOR_TAS5110C 6 #define SENSOR_TAS5110D 7 #define SENSOR_TAS5130CXX 8 __u8 reg11; }; typedef const __u8 sensor_init_t[8]; struct sensor_data { const __u8 *bridge_init; sensor_init_t *sensor_init; int sensor_init_size; int flags; __u8 sensor_addr; }; /* sensor_data flags */ #define F_SIF 0x01 /* sif or vga */ /* priv field of struct v4l2_pix_format flags (do not use low nibble!) */ #define MODE_RAW 0x10 /* raw bayer mode */ #define MODE_REDUCED_SIF 0x20 /* vga mode (320x240 / 160x120) on sif cam */ #define COMP 0xc7 /* 0x87 //0x07 */ #define COMP1 0xc9 /* 0x89 //0x09 */ #define MCK_INIT 0x63 #define MCK_INIT1 0x20 /*fixme: Bayer - 0x50 for JPEG ??*/ #define SYS_CLK 0x04 #define SENS(bridge, sensor, _flags, _sensor_addr) \ { \ .bridge_init = bridge, \ .sensor_init = sensor, \ .sensor_init_size = sizeof(sensor), \ .flags = _flags, .sensor_addr = _sensor_addr \ } /* We calculate the autogain at the end of the transfer of a frame, at this moment a frame with the old settings is being captured and transmitted. So if we adjust the gain or exposure we must ignore at least the next frame for the new settings to come into effect before doing any other adjustments. */ #define AUTOGAIN_IGNORE_FRAMES 1 static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 | MODE_RAW}, {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 5 / 4, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 5 / 4, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {640, 480, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 5 / 4, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; static const struct v4l2_pix_format sif_mode[] = { {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 | MODE_RAW | MODE_REDUCED_SIF}, {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 5 / 4, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 | MODE_REDUCED_SIF}, {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 | MODE_RAW}, {176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 5 / 4, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {320, 240, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 5 / 4, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 | MODE_REDUCED_SIF}, {352, 288, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 5 / 4, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; static const __u8 initHv7131d[] = { 0x04, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, 0x28, 0x1e, 0x60, 0x8e, 0x42, }; static const __u8 hv7131d_sensor_init[][8] = { {0xa0, 0x11, 0x01, 0x04, 0x00, 0x00, 0x00, 0x17}, {0xa0, 0x11, 0x02, 0x00, 0x00, 0x00, 0x00, 0x17}, {0xa0, 0x11, 0x28, 0x00, 0x00, 0x00, 0x00, 0x17}, {0xa0, 0x11, 0x30, 0x30, 0x00, 0x00, 0x00, 0x17}, /* reset level */ {0xa0, 0x11, 0x34, 0x02, 0x00, 0x00, 0x00, 0x17}, /* pixel bias volt */ }; static const __u8 initHv7131r[] = { 0x46, 0x77, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x28, 0x1e, 0x60, 0x8a, 0x20, }; static const __u8 hv7131r_sensor_init[][8] = { {0xc0, 0x11, 0x31, 0x38, 0x2a, 0x2e, 0x00, 0x10}, {0xa0, 0x11, 0x01, 0x08, 0x2a, 0x2e, 0x00, 0x10}, {0xb0, 0x11, 0x20, 0x00, 0xd0, 0x2e, 0x00, 0x10}, {0xc0, 0x11, 0x25, 0x03, 0x0e, 0x28, 0x00, 0x16}, {0xa0, 0x11, 0x30, 0x10, 0x0e, 0x28, 0x00, 0x15}, }; static const __u8 initOv6650[] = { 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b, 0x10, }; static const __u8 ov6650_sensor_init[][8] = { /* Bright, contrast, etc are set through SCBB interface. * AVCAP on win2 do not send any data on this controls. */ /* Anyway, some registers appears to alter bright and constrat */ /* Reset sensor */ {0xa0, 0x60, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* Set clock register 0x11 low nibble is clock divider */ {0xd0, 0x60, 0x11, 0xc0, 0x1b, 0x18, 0xc1, 0x10}, /* Next some unknown stuff */ {0xb0, 0x60, 0x15, 0x00, 0x02, 0x18, 0xc1, 0x10}, /* {0xa0, 0x60, 0x1b, 0x01, 0x02, 0x18, 0xc1, 0x10}, * THIS SET GREEN SCREEN * (pixels could be innverted in decode kind of "brg", * but blue wont be there. Avoid this data ... */ {0xd0, 0x60, 0x26, 0x01, 0x14, 0xd8, 0xa4, 0x10}, /* format out? */ {0xd0, 0x60, 0x26, 0x01, 0x14, 0xd8, 0xa4, 0x10}, {0xa0, 0x60, 0x30, 0x3d, 0x0a, 0xd8, 0xa4, 0x10}, /* Enable rgb brightness control */ {0xa0, 0x60, 0x61, 0x08, 0x00, 0x00, 0x00, 0x10}, /* HDG: Note windows uses the line below, which sets both register 0x60 and 0x61 I believe these registers of the ov6650 are identical as those of the ov7630, because if this is true the windows settings add a bit additional red gain and a lot additional blue gain, which matches my findings that the windows settings make blue much too blue and red a little too red. {0xb0, 0x60, 0x60, 0x66, 0x68, 0xd8, 0xa4, 0x10}, */ /* Some more unknown stuff */ {0xa0, 0x60, 0x68, 0x04, 0x68, 0xd8, 0xa4, 0x10}, {0xd0, 0x60, 0x17, 0x24, 0xd6, 0x04, 0x94, 0x10}, /* Clipreg */ }; static const __u8 initOv7630[] = { 0x04, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, /* r01 .. r08 */ 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */ 0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */ 0x28, 0x1e, /* H & V sizes r15 .. r16 */ 0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */ }; static const __u8 ov7630_sensor_init[][8] = { {0xa0, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x21, 0x01, 0x77, 0x3a, 0x00, 0x00, 0x10}, /* {0xd0, 0x21, 0x12, 0x7c, 0x01, 0x80, 0x34, 0x10}, jfm */ {0xd0, 0x21, 0x12, 0x5c, 0x00, 0x80, 0x34, 0x10}, /* jfm */ {0xa0, 0x21, 0x1b, 0x04, 0x00, 0x80, 0x34, 0x10}, {0xa0, 0x21, 0x20, 0x44, 0x00, 0x80, 0x34, 0x10}, {0xa0, 0x21, 0x23, 0xee, 0x00, 0x80, 0x34, 0x10}, {0xd0, 0x21, 0x26, 0xa0, 0x9a, 0xa0, 0x30, 0x10}, {0xb0, 0x21, 0x2a, 0x80, 0x00, 0xa0, 0x30, 0x10}, {0xb0, 0x21, 0x2f, 0x3d, 0x24, 0xa0, 0x30, 0x10}, {0xa0, 0x21, 0x32, 0x86, 0x24, 0xa0, 0x30, 0x10}, {0xb0, 0x21, 0x60, 0xa9, 0x4a, 0xa0, 0x30, 0x10}, /* {0xb0, 0x21, 0x60, 0xa9, 0x42, 0xa0, 0x30, 0x10}, * jfm */ {0xa0, 0x21, 0x65, 0x00, 0x42, 0xa0, 0x30, 0x10}, {0xa0, 0x21, 0x69, 0x38, 0x42, 0xa0, 0x30, 0x10}, {0xc0, 0x21, 0x6f, 0x88, 0x0b, 0x00, 0x30, 0x10}, {0xc0, 0x21, 0x74, 0x21, 0x8e, 0x00, 0x30, 0x10}, {0xa0, 0x21, 0x7d, 0xf7, 0x8e, 0x00, 0x30, 0x10}, {0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10}, }; static const __u8 initPas106[] = { 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x00, 0x16, 0x12, 0x24, COMP1, MCK_INIT1, }; /* compression 0x86 mckinit1 0x2b */ /* "Known" PAS106B registers: 0x02 clock divider 0x03 Variable framerate bits 4-11 0x04 Var framerate bits 0-3, one must leave the 4 msb's at 0 !! The variable framerate control must never be set lower then 300, which sets the framerate at 90 / reg02, otherwise vsync is lost. 0x05 Shutter Time Line Offset, this can be used as an exposure control: 0 = use full frame time, 255 = no exposure at all Note this may never be larger then "var-framerate control" / 2 - 2. When var-framerate control is < 514, no exposure is reached at the max allowed value for the framerate control value, rather then at 255. 0x06 Shutter Time Pixel Offset, like reg05 this influences exposure, but only a very little bit, leave at 0xcd 0x07 offset sign bit (bit0 1 > negative offset) 0x08 offset 0x09 Blue Gain 0x0a Green1 Gain 0x0b Green2 Gain 0x0c Red Gain 0x0e Global gain 0x13 Write 1 to commit settings to sensor */ static const __u8 pas106_sensor_init[][8] = { /* Pixel Clock Divider 6 */ { 0xa1, 0x40, 0x02, 0x04, 0x00, 0x00, 0x00, 0x14 }, /* Frame Time MSB (also seen as 0x12) */ { 0xa1, 0x40, 0x03, 0x13, 0x00, 0x00, 0x00, 0x14 }, /* Frame Time LSB (also seen as 0x05) */ { 0xa1, 0x40, 0x04, 0x06, 0x00, 0x00, 0x00, 0x14 }, /* Shutter Time Line Offset (also seen as 0x6d) */ { 0xa1, 0x40, 0x05, 0x65, 0x00, 0x00, 0x00, 0x14 }, /* Shutter Time Pixel Offset (also seen as 0xb1) */ { 0xa1, 0x40, 0x06, 0xcd, 0x00, 0x00, 0x00, 0x14 }, /* Black Level Subtract Sign (also seen 0x00) */ { 0xa1, 0x40, 0x07, 0xc1, 0x00, 0x00, 0x00, 0x14 }, /* Black Level Subtract Level (also seen 0x01) */ { 0xa1, 0x40, 0x08, 0x06, 0x00, 0x00, 0x00, 0x14 }, { 0xa1, 0x40, 0x08, 0x06, 0x00, 0x00, 0x00, 0x14 }, /* Color Gain B Pixel 5 a */ { 0xa1, 0x40, 0x09, 0x05, 0x00, 0x00, 0x00, 0x14 }, /* Color Gain G1 Pixel 1 5 */ { 0xa1, 0x40, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x14 }, /* Color Gain G2 Pixel 1 0 5 */ { 0xa1, 0x40, 0x0b, 0x04, 0x00, 0x00, 0x00, 0x14 }, /* Color Gain R Pixel 3 1 */ { 0xa1, 0x40, 0x0c, 0x05, 0x00, 0x00, 0x00, 0x14 }, /* Color GainH Pixel */ { 0xa1, 0x40, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x14 }, /* Global Gain */ { 0xa1, 0x40, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x14 }, /* Contrast */ { 0xa1, 0x40, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x14 }, /* H&V synchro polarity */ { 0xa1, 0x40, 0x10, 0x06, 0x00, 0x00, 0x00, 0x14 }, /* ?default */ { 0xa1, 0x40, 0x11, 0x06, 0x00, 0x00, 0x00, 0x14 }, /* DAC scale */ { 0xa1, 0x40, 0x12, 0x06, 0x00, 0x00, 0x00, 0x14 }, /* ?default */ { 0xa1, 0x40, 0x14, 0x02, 0x00, 0x00, 0x00, 0x14 }, /* Validate Settings */ { 0xa1, 0x40, 0x13, 0x01, 0x00, 0x00, 0x00, 0x14 }, }; static const __u8 initPas202[] = { 0x44, 0x44, 0x21, 0x30, 0x00, 0x00, 0x00, 0x80, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x03, 0x0a, 0x28, 0x1e, 0x20, 0x89, 0x20, }; /* "Known" PAS202BCB registers: 0x02 clock divider 0x04 Variable framerate bits 6-11 (*) 0x05 Var framerate bits 0-5, one must leave the 2 msb's at 0 !! 0x07 Blue Gain 0x08 Green Gain 0x09 Red Gain 0x0b offset sign bit (bit0 1 > negative offset) 0x0c offset 0x0e Unknown image is slightly brighter when bit 0 is 0, if reg0f is 0 too, leave at 1 otherwise we get a jump in our exposure control 0x0f Exposure 0-255, 0 = use full frame time, 255 = no exposure at all 0x10 Master gain 0 - 31 0x11 write 1 to apply changes (*) The variable framerate control must never be set lower then 500 which sets the framerate at 30 / reg02, otherwise vsync is lost. */ static const __u8 pas202_sensor_init[][8] = { /* Set the clock divider to 4 -> 30 / 4 = 7.5 fps, we would like to set it lower, but for some reason the bridge starts missing vsync's then */ {0xa0, 0x40, 0x02, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xd0, 0x40, 0x04, 0x07, 0x34, 0x00, 0x09, 0x10}, {0xd0, 0x40, 0x08, 0x01, 0x00, 0x00, 0x01, 0x10}, {0xd0, 0x40, 0x0c, 0x00, 0x0c, 0x01, 0x32, 0x10}, {0xd0, 0x40, 0x10, 0x00, 0x01, 0x00, 0x63, 0x10}, {0xa0, 0x40, 0x15, 0x70, 0x01, 0x00, 0x63, 0x10}, {0xa0, 0x40, 0x18, 0x00, 0x01, 0x00, 0x63, 0x10}, {0xa0, 0x40, 0x11, 0x01, 0x01, 0x00, 0x63, 0x10}, {0xa0, 0x40, 0x03, 0x56, 0x01, 0x00, 0x63, 0x10}, {0xa0, 0x40, 0x11, 0x01, 0x01, 0x00, 0x63, 0x10}, }; static const __u8 initTas5110c[] = { 0x44, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x09, 0x0a, 0x16, 0x12, 0x60, 0x86, 0x2b, }; /* Same as above, except a different hstart */ static const __u8 initTas5110d[] = { 0x44, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x09, 0x0a, 0x16, 0x12, 0x60, 0x86, 0x2b, }; /* tas5110c is 3 wire, tas5110d is 2 wire (regular i2c) */ static const __u8 tas5110c_sensor_init[][8] = { {0x30, 0x11, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x10}, {0x30, 0x11, 0x02, 0x20, 0xa9, 0x00, 0x00, 0x10}, }; /* Known TAS5110D registers * reg02: gain, bit order reversed!! 0 == max gain, 255 == min gain * reg03: bit3: vflip, bit4: ~hflip, bit7: ~gainboost (~ == inverted) * Note: writing reg03 seems to only work when written together with 02 */ static const __u8 tas5110d_sensor_init[][8] = { {0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17}, /* reset */ }; static const __u8 initTas5130[] = { 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a, 0x28, 0x1e, 0x60, COMP, MCK_INIT, }; static const __u8 tas5130_sensor_init[][8] = { /* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10}, * shutter 0x47 short exposure? */ {0x30, 0x11, 0x00, 0x40, 0x01, 0x00, 0x00, 0x10}, /* shutter 0x01 long exposure */ {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10}, }; static const struct sensor_data sensor_data[] = { SENS(initHv7131d, hv7131d_sensor_init, 0, 0), SENS(initHv7131r, hv7131r_sensor_init, 0, 0), SENS(initOv6650, ov6650_sensor_init, F_SIF, 0x60), SENS(initOv7630, ov7630_sensor_init, 0, 0x21), SENS(initPas106, pas106_sensor_init, F_SIF, 0), SENS(initPas202, pas202_sensor_init, 0, 0), SENS(initTas5110c, tas5110c_sensor_init, F_SIF, 0), SENS(initTas5110d, tas5110d_sensor_init, F_SIF, 0), SENS(initTas5130, tas5130_sensor_init, 0, 0), }; /* get one byte in gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, __u16 value) { int res; if (gspca_dev->usb_err < 0) return; res = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, /* index */ gspca_dev->usb_buf, 1, 500); if (res < 0) { dev_err(gspca_dev->v4l2_dev.dev, "Error reading register %02x: %d\n", value, res); gspca_dev->usb_err = res; /* * Make sure the result is zeroed to avoid uninitialized * values. */ gspca_dev->usb_buf[0] = 0; } } static void reg_w(struct gspca_dev *gspca_dev, __u16 value, const __u8 *buffer, int len) { int res; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, buffer, len); res = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, /* index */ gspca_dev->usb_buf, len, 500); if (res < 0) { dev_err(gspca_dev->v4l2_dev.dev, "Error writing register %02x: %d\n", value, res); gspca_dev->usb_err = res; } } static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf) { int retry = 60; if (gspca_dev->usb_err < 0) return; /* is i2c ready */ reg_w(gspca_dev, 0x08, buf, 8); while (retry--) { if (gspca_dev->usb_err < 0) return; msleep(1); reg_r(gspca_dev, 0x08); if (gspca_dev->usb_buf[0] & 0x04) { if (gspca_dev->usb_buf[0] & 0x08) { dev_err(gspca_dev->v4l2_dev.dev, "i2c error writing %8ph\n", buf); gspca_dev->usb_err = -EIO; } return; } } dev_err(gspca_dev->v4l2_dev.dev, "i2c write timeout\n"); gspca_dev->usb_err = -EIO; } static void i2c_w_vector(struct gspca_dev *gspca_dev, const __u8 buffer[][8], int len) { for (;;) { if (gspca_dev->usb_err < 0) return; i2c_w(gspca_dev, *buffer); len -= 8; if (len <= 0) break; buffer++; } } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->sensor) { case SENSOR_OV6650: case SENSOR_OV7630: { __u8 i2cOV[] = {0xa0, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10}; /* change reg 0x06 */ i2cOV[1] = sensor_data[sd->sensor].sensor_addr; i2cOV[3] = sd->brightness->val; i2c_w(gspca_dev, i2cOV); break; } case SENSOR_PAS106: case SENSOR_PAS202: { __u8 i2cpbright[] = {0xb0, 0x40, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x16}; __u8 i2cpdoit[] = {0xa0, 0x40, 0x11, 0x01, 0x00, 0x00, 0x00, 0x16}; /* PAS106 uses reg 7 and 8 instead of b and c */ if (sd->sensor == SENSOR_PAS106) { i2cpbright[2] = 7; i2cpdoit[2] = 0x13; } if (sd->brightness->val < 127) { /* change reg 0x0b, signreg */ i2cpbright[3] = 0x01; /* set reg 0x0c, offset */ i2cpbright[4] = 127 - sd->brightness->val; } else i2cpbright[4] = sd->brightness->val - 127; i2c_w(gspca_dev, i2cpbright); i2c_w(gspca_dev, i2cpdoit); break; } default: break; } } static void setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 gain = gspca_dev->gain->val; switch (sd->sensor) { case SENSOR_HV7131D: { __u8 i2c[] = {0xc0, 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x17}; i2c[3] = 0x3f - gain; i2c[4] = 0x3f - gain; i2c[5] = 0x3f - gain; i2c_w(gspca_dev, i2c); break; } case SENSOR_TAS5110C: case SENSOR_TAS5130CXX: { __u8 i2c[] = {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10}; i2c[4] = 255 - gain; i2c_w(gspca_dev, i2c); break; } case SENSOR_TAS5110D: { __u8 i2c[] = { 0xb0, 0x61, 0x02, 0x00, 0x10, 0x00, 0x00, 0x17 }; gain = 255 - gain; /* The bits in the register are the wrong way around!! */ i2c[3] |= (gain & 0x80) >> 7; i2c[3] |= (gain & 0x40) >> 5; i2c[3] |= (gain & 0x20) >> 3; i2c[3] |= (gain & 0x10) >> 1; i2c[3] |= (gain & 0x08) << 1; i2c[3] |= (gain & 0x04) << 3; i2c[3] |= (gain & 0x02) << 5; i2c[3] |= (gain & 0x01) << 7; i2c_w(gspca_dev, i2c); break; } case SENSOR_OV6650: case SENSOR_OV7630: { __u8 i2c[] = {0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}; /* * The ov7630's gain is weird, at 32 the gain drops to the * same level as at 16, so skip 32-47 (of the 0-63 scale). */ if (sd->sensor == SENSOR_OV7630 && gain >= 32) gain += 16; i2c[1] = sensor_data[sd->sensor].sensor_addr; i2c[3] = gain; i2c_w(gspca_dev, i2c); break; } case SENSOR_PAS106: case SENSOR_PAS202: { __u8 i2cpgain[] = {0xa0, 0x40, 0x10, 0x00, 0x00, 0x00, 0x00, 0x15}; __u8 i2cpcolorgain[] = {0xc0, 0x40, 0x07, 0x00, 0x00, 0x00, 0x00, 0x15}; __u8 i2cpdoit[] = {0xa0, 0x40, 0x11, 0x01, 0x00, 0x00, 0x00, 0x16}; /* PAS106 uses different regs (and has split green gains) */ if (sd->sensor == SENSOR_PAS106) { i2cpgain[2] = 0x0e; i2cpcolorgain[0] = 0xd0; i2cpcolorgain[2] = 0x09; i2cpdoit[2] = 0x13; } i2cpgain[3] = gain; i2cpcolorgain[3] = gain >> 1; i2cpcolorgain[4] = gain >> 1; i2cpcolorgain[5] = gain >> 1; i2cpcolorgain[6] = gain >> 1; i2c_w(gspca_dev, i2cpgain); i2c_w(gspca_dev, i2cpcolorgain); i2c_w(gspca_dev, i2cpdoit); break; } default: if (sd->bridge == BRIDGE_103) { u8 buf[3] = { gain, gain, gain }; /* R, G, B */ reg_w(gspca_dev, 0x05, buf, 3); } else { u8 buf[2]; buf[0] = gain << 4 | gain; /* Red and blue */ buf[1] = gain; /* Green */ reg_w(gspca_dev, 0x10, buf, 2); } } } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->sensor) { case SENSOR_HV7131D: { /* Note the datasheet wrongly says line mode exposure uses reg 0x26 and 0x27, testing has shown 0x25 + 0x26 */ __u8 i2c[] = {0xc0, 0x11, 0x25, 0x00, 0x00, 0x00, 0x00, 0x17}; u16 reg = gspca_dev->exposure->val; i2c[3] = reg >> 8; i2c[4] = reg & 0xff; i2c_w(gspca_dev, i2c); break; } case SENSOR_TAS5110C: case SENSOR_TAS5110D: { /* register 19's high nibble contains the sn9c10x clock divider The high nibble configures the no fps according to the formula: 60 / high_nibble. With a maximum of 30 fps */ u8 reg = gspca_dev->exposure->val; reg = (reg << 4) | 0x0b; reg_w(gspca_dev, 0x19, ®, 1); break; } case SENSOR_OV6650: case SENSOR_OV7630: { /* The ov6650 / ov7630 have 2 registers which both influence exposure, register 11, whose low nibble sets the nr off fps according to: fps = 30 / (low_nibble + 1) The fps configures the maximum exposure setting, but it is possible to use less exposure then what the fps maximum allows by setting register 10. register 10 configures the actual exposure as quotient of the full exposure, with 0 being no exposure at all (not very useful) and reg10_max being max exposure possible at that framerate. The code maps our 0 - 510 ms exposure ctrl to these 2 registers, trying to keep fps as high as possible. */ __u8 i2c[] = {0xb0, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}; int reg10, reg11, reg10_max; /* ov6645 datasheet says reg10_max is 9a, but that uses tline * 2 * reg10 as formula for calculating texpo, the ov6650 probably uses the same formula as the 7730 which uses tline * 4 * reg10, which explains why the reg10max we've found experimentally for the ov6650 is exactly half that of the ov6645. The ov7630 datasheet says the max is 0x41. */ if (sd->sensor == SENSOR_OV6650) { reg10_max = 0x4d; i2c[4] = 0xc0; /* OV6650 needs non default vsync pol */ } else reg10_max = 0x41; reg11 = (15 * gspca_dev->exposure->val + 999) / 1000; if (reg11 < 1) reg11 = 1; else if (reg11 > 16) reg11 = 16; /* In 640x480, if the reg11 has less than 4, the image is unstable (the bridge goes into a higher compression mode which we have not reverse engineered yet). */ if (gspca_dev->pixfmt.width == 640 && reg11 < 4) reg11 = 4; /* frame exposure time in ms = 1000 * reg11 / 30 -> reg10 = (gspca_dev->exposure->val / 2) * reg10_max / (1000 * reg11 / 30) */ reg10 = (gspca_dev->exposure->val * 15 * reg10_max) / (1000 * reg11); /* Don't allow this to get below 10 when using autogain, the steps become very large (relatively) when below 10 causing the image to oscillate from much too dark, to much too bright and back again. */ if (gspca_dev->autogain->val && reg10 < 10) reg10 = 10; else if (reg10 > reg10_max) reg10 = reg10_max; /* Write reg 10 and reg11 low nibble */ i2c[1] = sensor_data[sd->sensor].sensor_addr; i2c[3] = reg10; i2c[4] |= reg11 - 1; /* If register 11 didn't change, don't change it */ if (sd->reg11 == reg11) i2c[0] = 0xa0; i2c_w(gspca_dev, i2c); if (gspca_dev->usb_err == 0) sd->reg11 = reg11; break; } case SENSOR_PAS202: { __u8 i2cpframerate[] = {0xb0, 0x40, 0x04, 0x00, 0x00, 0x00, 0x00, 0x16}; __u8 i2cpexpo[] = {0xa0, 0x40, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x16}; const __u8 i2cpdoit[] = {0xa0, 0x40, 0x11, 0x01, 0x00, 0x00, 0x00, 0x16}; int framerate_ctrl; /* The exposure knee for the autogain algorithm is 200 (100 ms / 10 fps on other sensors), for values below this use the control for setting the partial frame expose time, above that use variable framerate. This way we run at max framerate (640x480@7.5 fps, 320x240@10fps) until the knee is reached. Using the variable framerate control above 200 is better then playing around with both clockdiv + partial frame exposure times (like we are doing with the ov chips), as that sometimes leads to jumps in the exposure control, which are bad for auto exposure. */ if (gspca_dev->exposure->val < 200) { i2cpexpo[3] = 255 - (gspca_dev->exposure->val * 255) / 200; framerate_ctrl = 500; } else { /* The PAS202's exposure control goes from 0 - 4095, but anything below 500 causes vsync issues, so scale our 200-1023 to 500-4095 */ framerate_ctrl = (gspca_dev->exposure->val - 200) * 1000 / 229 + 500; } i2cpframerate[3] = framerate_ctrl >> 6; i2cpframerate[4] = framerate_ctrl & 0x3f; i2c_w(gspca_dev, i2cpframerate); i2c_w(gspca_dev, i2cpexpo); i2c_w(gspca_dev, i2cpdoit); break; } case SENSOR_PAS106: { __u8 i2cpframerate[] = {0xb1, 0x40, 0x03, 0x00, 0x00, 0x00, 0x00, 0x14}; __u8 i2cpexpo[] = {0xa1, 0x40, 0x05, 0x00, 0x00, 0x00, 0x00, 0x14}; const __u8 i2cpdoit[] = {0xa1, 0x40, 0x13, 0x01, 0x00, 0x00, 0x00, 0x14}; int framerate_ctrl; /* For values below 150 use partial frame exposure, above that use framerate ctrl */ if (gspca_dev->exposure->val < 150) { i2cpexpo[3] = 150 - gspca_dev->exposure->val; framerate_ctrl = 300; } else { /* The PAS106's exposure control goes from 0 - 4095, but anything below 300 causes vsync issues, so scale our 150-1023 to 300-4095 */ framerate_ctrl = (gspca_dev->exposure->val - 150) * 1000 / 230 + 300; } i2cpframerate[3] = framerate_ctrl >> 4; i2cpframerate[4] = framerate_ctrl & 0x0f; i2c_w(gspca_dev, i2cpframerate); i2c_w(gspca_dev, i2cpexpo); i2c_w(gspca_dev, i2cpdoit); break; } default: break; } } static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV6650 || sd->sensor == SENSOR_OV7630) { /* Framerate adjust register for artificial light 50 hz flicker compensation, for the ov6650 this is identical to ov6630 0x2b register, see ov6630 datasheet. 0x4f / 0x8a -> (30 fps -> 25 fps), 0x00 -> no adjustment */ __u8 i2c[] = {0xa0, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}; switch (sd->plfreq->val) { default: /* case 0: * no filter*/ /* case 2: * 60 hz */ i2c[3] = 0; break; case 1: /* 50 hz */ i2c[3] = (sd->sensor == SENSOR_OV6650) ? 0x4f : 0x8a; break; } i2c[1] = sensor_data[sd->sensor].sensor_addr; i2c_w(gspca_dev, i2c); } } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int deadzone, desired_avg_lum, avg_lum; avg_lum = atomic_read(&sd->avg_lum); if (avg_lum == -1) return; if (sd->autogain_ignore_frames > 0) { sd->autogain_ignore_frames--; return; } /* SIF / VGA sensors have a different autoexposure area and thus different avg_lum values for the same picture brightness */ if (sensor_data[sd->sensor].flags & F_SIF) { deadzone = 500; /* SIF sensors tend to overexpose, so keep this small */ desired_avg_lum = 5000; } else { deadzone = 1500; desired_avg_lum = 13000; } if (sd->brightness) desired_avg_lum = sd->brightness->val * desired_avg_lum / 127; if (gspca_dev->exposure->maximum < 500) { if (gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum, desired_avg_lum, deadzone)) sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES; } else { int gain_knee = (s32)gspca_dev->gain->maximum * 9 / 10; if (gspca_expo_autogain(gspca_dev, avg_lum, desired_avg_lum, deadzone, gain_knee, sd->exposure_knee)) sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES; } } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; reg_r(gspca_dev, 0x00); if (gspca_dev->usb_buf[0] != 0x10) return -ENODEV; /* copy the webcam info from the device id */ sd->sensor = id->driver_info >> 8; sd->bridge = id->driver_info & 0xff; cam = &gspca_dev->cam; if (!(sensor_data[sd->sensor].flags & F_SIF)) { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); } else { cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); } cam->npkt = 36; /* 36 packets per ISOC message */ return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { const __u8 stop = 0x09; /* Disable stream turn of LED */ reg_w(gspca_dev, 0x01, &stop, 1); return gspca_dev->usb_err; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) { /* when switching to autogain set defaults to make sure we are on a valid point of the autogain gain / exposure knee graph, and give this change time to take effect before doing autogain. */ gspca_dev->gain->val = gspca_dev->gain->default_value; gspca_dev->exposure->val = gspca_dev->exposure->default_value; sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES; } if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev); break; case V4L2_CID_AUTOGAIN: if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val)) setexposure(gspca_dev); if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val)) setgain(gspca_dev); break; case V4L2_CID_POWER_LINE_FREQUENCY: setfreq(gspca_dev); break; default: return -EINVAL; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; /* this function is called at probe time */ static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); if (sd->sensor == SENSOR_OV6650 || sd->sensor == SENSOR_OV7630 || sd->sensor == SENSOR_PAS106 || sd->sensor == SENSOR_PAS202) sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 127); /* Gain range is sensor dependent */ switch (sd->sensor) { case SENSOR_OV6650: case SENSOR_PAS106: case SENSOR_PAS202: gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 31, 1, 15); break; case SENSOR_OV7630: gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 47, 1, 31); break; case SENSOR_HV7131D: gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 63, 1, 31); break; case SENSOR_TAS5110C: case SENSOR_TAS5110D: case SENSOR_TAS5130CXX: gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 255, 1, 127); break; default: if (sd->bridge == BRIDGE_103) { gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 127, 1, 63); } else { gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 15, 1, 7); } } /* Exposure range is sensor dependent, and not all have exposure */ switch (sd->sensor) { case SENSOR_HV7131D: gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 8191, 1, 482); sd->exposure_knee = 964; break; case SENSOR_OV6650: case SENSOR_OV7630: case SENSOR_PAS106: case SENSOR_PAS202: gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 1023, 1, 66); sd->exposure_knee = 200; break; case SENSOR_TAS5110C: case SENSOR_TAS5110D: gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 2, 15, 1, 2); break; } if (gspca_dev->exposure) { gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); } if (sd->sensor == SENSOR_OV6650 || sd->sensor == SENSOR_OV7630) sd->plfreq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, V4L2_CID_POWER_LINE_FREQUENCY_DISABLED); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } if (gspca_dev->autogain) v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; int i, mode; __u8 regs[0x31]; mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07; /* Copy registers 0x01 - 0x19 from the template */ memcpy(®s[0x01], sensor_data[sd->sensor].bridge_init, 0x19); /* Set the mode */ regs[0x18] |= mode << 4; /* Set bridge gain to 1.0 */ if (sd->bridge == BRIDGE_103) { regs[0x05] = 0x20; /* Red */ regs[0x06] = 0x20; /* Green */ regs[0x07] = 0x20; /* Blue */ } else { regs[0x10] = 0x00; /* Red and blue */ regs[0x11] = 0x00; /* Green */ } /* Setup pixel numbers and auto exposure window */ if (sensor_data[sd->sensor].flags & F_SIF) { regs[0x1a] = 0x14; /* HO_SIZE 640, makes no sense */ regs[0x1b] = 0x0a; /* VO_SIZE 320, makes no sense */ regs[0x1c] = 0x02; /* AE H-start 64 */ regs[0x1d] = 0x02; /* AE V-start 64 */ regs[0x1e] = 0x09; /* AE H-end 288 */ regs[0x1f] = 0x07; /* AE V-end 224 */ } else { regs[0x1a] = 0x1d; /* HO_SIZE 960, makes no sense */ regs[0x1b] = 0x10; /* VO_SIZE 512, makes no sense */ regs[0x1c] = 0x05; /* AE H-start 160 */ regs[0x1d] = 0x03; /* AE V-start 96 */ regs[0x1e] = 0x0f; /* AE H-end 480 */ regs[0x1f] = 0x0c; /* AE V-end 384 */ } /* Setup the gamma table (only used with the sn9c103 bridge) */ for (i = 0; i < 16; i++) regs[0x20 + i] = i * 16; regs[0x20 + i] = 255; /* Special cases where some regs depend on mode or bridge */ switch (sd->sensor) { case SENSOR_TAS5130CXX: /* FIXME / TESTME probably not mode specific at all most likely the upper nibble of 0x19 is exposure (clock divider) just as with the tas5110, we need someone to test this. */ regs[0x19] = mode ? 0x23 : 0x43; break; case SENSOR_OV7630: /* FIXME / TESTME for some reason with the 101/102 bridge the clock is set to 12 Mhz (reg1 == 0x04), rather then 24. Also the hstart needs to go from 1 to 2 when using a 103, which is likely related. This does not seem right. */ if (sd->bridge == BRIDGE_103) { regs[0x01] = 0x44; /* Select 24 Mhz clock */ regs[0x12] = 0x02; /* Set hstart to 2 */ } break; case SENSOR_PAS202: /* For some unknown reason we need to increase hstart by 1 on the sn9c103, otherwise we get wrong colors (bayer shift). */ if (sd->bridge == BRIDGE_103) regs[0x12] += 1; break; } /* Disable compression when the raw bayer format has been selected */ if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) regs[0x18] &= ~0x80; /* Vga mode emulation on SIF sensor? */ if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) { regs[0x12] += 16; /* hstart adjust */ regs[0x13] += 24; /* vstart adjust */ regs[0x15] = 320 / 16; /* hsize */ regs[0x16] = 240 / 16; /* vsize */ } /* reg 0x01 bit 2 video transfert on */ reg_w(gspca_dev, 0x01, ®s[0x01], 1); /* reg 0x17 SensorClk enable inv Clk 0x60 */ reg_w(gspca_dev, 0x17, ®s[0x17], 1); /* Set the registers from the template */ reg_w(gspca_dev, 0x01, ®s[0x01], (sd->bridge == BRIDGE_103) ? 0x30 : 0x1f); /* Init the sensor */ i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init, sensor_data[sd->sensor].sensor_init_size); /* Mode / bridge specific sensor setup */ switch (sd->sensor) { case SENSOR_PAS202: { const __u8 i2cpclockdiv[] = {0xa0, 0x40, 0x02, 0x03, 0x00, 0x00, 0x00, 0x10}; /* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */ if (mode) i2c_w(gspca_dev, i2cpclockdiv); break; } case SENSOR_OV7630: /* FIXME / TESTME We should be able to handle this identical for the 101/102 and the 103 case */ if (sd->bridge == BRIDGE_103) { const __u8 i2c[] = { 0xa0, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10 }; i2c_w(gspca_dev, i2c); } break; } /* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */ reg_w(gspca_dev, 0x15, ®s[0x15], 2); /* compression register */ reg_w(gspca_dev, 0x18, ®s[0x18], 1); /* H_start */ reg_w(gspca_dev, 0x12, ®s[0x12], 1); /* V_START */ reg_w(gspca_dev, 0x13, ®s[0x13], 1); /* reset 0x17 SensorClk enable inv Clk 0x60 */ /*fixme: ov7630 [17]=68 8f (+20 if 102)*/ reg_w(gspca_dev, 0x17, ®s[0x17], 1); /*MCKSIZE ->3 */ /*fixme: not ov7630*/ reg_w(gspca_dev, 0x19, ®s[0x19], 1); /* AE_STRX AE_STRY AE_ENDX AE_ENDY */ reg_w(gspca_dev, 0x1c, ®s[0x1c], 4); /* Enable video transfert */ reg_w(gspca_dev, 0x01, ®s[0x01], 1); /* Compression */ reg_w(gspca_dev, 0x18, ®s[0x18], 2); msleep(20); sd->reg11 = -1; setgain(gspca_dev); setbrightness(gspca_dev); setexposure(gspca_dev); setfreq(gspca_dev); sd->frames_to_drop = 0; sd->autogain_ignore_frames = 0; gspca_dev->exp_too_high_cnt = 0; gspca_dev->exp_too_low_cnt = 0; atomic_set(&sd->avg_lum, -1); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { sd_init(gspca_dev); } static u8* find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; int i, header_size = (sd->bridge == BRIDGE_103) ? 18 : 12; /* frames start with: * ff ff 00 c4 c4 96 synchro * 00 (unknown) * xx (frame sequence / size / compression) * (xx) (idem - extra byte for sn9c103) * ll mm brightness sum inside auto exposure * ll mm brightness sum outside auto exposure * (xx xx xx xx xx) audio values for snc103 */ for (i = 0; i < len; i++) { switch (sd->header_read) { case 0: if (data[i] == 0xff) sd->header_read++; break; case 1: if (data[i] == 0xff) sd->header_read++; else sd->header_read = 0; break; case 2: if (data[i] == 0x00) sd->header_read++; else if (data[i] != 0xff) sd->header_read = 0; break; case 3: if (data[i] == 0xc4) sd->header_read++; else if (data[i] == 0xff) sd->header_read = 1; else sd->header_read = 0; break; case 4: if (data[i] == 0xc4) sd->header_read++; else if (data[i] == 0xff) sd->header_read = 1; else sd->header_read = 0; break; case 5: if (data[i] == 0x96) sd->header_read++; else if (data[i] == 0xff) sd->header_read = 1; else sd->header_read = 0; break; default: sd->header[sd->header_read - 6] = data[i]; sd->header_read++; if (sd->header_read == header_size) { sd->header_read = 0; return data + i + 1; } } } return NULL; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { int fr_h_sz = 0, lum_offset = 0, len_after_sof = 0; struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; u8 *sof; sof = find_sof(gspca_dev, data, len); if (sof) { if (sd->bridge == BRIDGE_103) { fr_h_sz = 18; lum_offset = 3; } else { fr_h_sz = 12; lum_offset = 2; } len_after_sof = len - (sof - data); len = (sof - data) - fr_h_sz; if (len < 0) len = 0; } if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) { /* In raw mode we sometimes get some garbage after the frame ignore this */ int used; int size = cam->cam_mode[gspca_dev->curr_mode].sizeimage; used = gspca_dev->image_len; if (used + len > size) len = size - used; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); if (sof) { int lum = sd->header[lum_offset] + (sd->header[lum_offset + 1] << 8); /* When exposure changes midway a frame we get a lum of 0 in this case drop 2 frames as the frames directly after an exposure change have an unstable image. Sometimes lum *really* is 0 (cam used in low light with low exposure setting), so do not drop frames if the previous lum was 0 too. */ if (lum == 0 && sd->prev_avg_lum != 0) { lum = -1; sd->frames_to_drop = 2; sd->prev_avg_lum = 0; } else sd->prev_avg_lum = lum; atomic_set(&sd->avg_lum, lum); if (sd->frames_to_drop) sd->frames_to_drop--; else gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, sof, len_after_sof); } } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; if (len == 1 && data[0] == 1) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } return ret; } #endif /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ #define SB(sensor, bridge) \ .driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */ {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */ {USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */ {USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)}, {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, {USB_DEVICE(0x0c45, 0x602a), SB(HV7131D, 102)}, /* {USB_DEVICE(0x0c45, 0x602b), SB(MI0343, 102)}, */ {USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)}, {USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)}, {USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)}, /* {USB_DEVICE(0x0c45, 0x6030), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */ /* {USB_DEVICE(0x0c45, 0x6082), SB(MI03XX, 103)}, */ /* MI0343 MI0360 */ {USB_DEVICE(0x0c45, 0x6083), SB(HV7131D, 103)}, {USB_DEVICE(0x0c45, 0x608c), SB(HV7131R, 103)}, /* {USB_DEVICE(0x0c45, 0x608e), SB(CISVF10, 103)}, */ {USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)}, {USB_DEVICE(0x0c45, 0x60a8), SB(PAS106, 103)}, {USB_DEVICE(0x0c45, 0x60aa), SB(TAS5130CXX, 103)}, {USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)}, {USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
6 1 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | // SPDX-License-Identifier: GPL-2.0-only /* Common methods for dibusb-based-receivers. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" MODULE_DESCRIPTION("Common methods for DIB3000MC"); MODULE_LICENSE("GPL"); /* 3000MC/P stuff */ // Config Adjacent channels Perf -cal22 static struct dibx000_agc_config dib3000p_mt2060_agc_config = { .band_caps = BAND_VHF | BAND_UHF, .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0), .agc1_max = 48497, .agc1_min = 23593, .agc2_max = 46531, .agc2_min = 24904, .agc1_pt1 = 0x65, .agc1_pt2 = 0x69, .agc1_slope1 = 0x51, .agc1_slope2 = 0x27, .agc2_pt1 = 0, .agc2_pt2 = 0x33, .agc2_slope1 = 0x35, .agc2_slope2 = 0x37, }; static struct dib3000mc_config stk3000p_dib3000p_config = { &dib3000p_mt2060_agc_config, .max_time = 0x196, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, .agc_command1 = 1, .agc_command2 = 1, }; static struct dibx000_agc_config dib3000p_panasonic_agc_config = { .band_caps = BAND_VHF | BAND_UHF, .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0), .agc1_max = 56361, .agc1_min = 22282, .agc2_max = 47841, .agc2_min = 36045, .agc1_pt1 = 0x3b, .agc1_pt2 = 0x6b, .agc1_slope1 = 0x55, .agc1_slope2 = 0x1d, .agc2_pt1 = 0, .agc2_pt2 = 0x0a, .agc2_slope1 = 0x95, .agc2_slope2 = 0x1e, }; static struct dib3000mc_config mod3000p_dib3000p_config = { &dib3000p_panasonic_agc_config, .max_time = 0x51, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, .agc_command1 = 1, .agc_command2 = 1, }; int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap) { if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON && le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) { msleep(1000); } adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000P_I2C_ADDRESS, &mod3000p_dib3000p_config); if ((adap->fe_adap[0].fe) == NULL) adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000MC_I2C_ADDRESS, &mod3000p_dib3000p_config); if ((adap->fe_adap[0].fe) != NULL) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; st->ops.pid_parse = dib3000mc_pid_parse; st->ops.pid_ctrl = dib3000mc_pid_control; } return 0; } return -ENODEV; } EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach); static struct mt2060_config stk3000p_mt2060_config = { 0x60 }; int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap) { struct dibusb_state *st = adap->priv; u8 a,b; u16 if1 = 1220; struct i2c_adapter *tun_i2c; // First IF calibration for Liteon Sticks if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON && le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) { dibusb_read_eeprom_byte(adap->dev,0x7E,&a); dibusb_read_eeprom_byte(adap->dev,0x7F,&b); if (a == 0x00) if1 += b; else if (a == 0x80) if1 -= b; else warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b); } else if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_DIBCOM && le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_DIBCOM_MOD3001_WARM) { u8 desc; dibusb_read_eeprom_byte(adap->dev, 7, &desc); if (desc == 2) { a = 127; do { dibusb_read_eeprom_byte(adap->dev, a, &desc); a--; } while (a > 7 && (desc == 0xff || desc == 0x00)); if (desc & 0x80) if1 -= (0xff - desc); else if1 += desc; } } tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1); if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) { /* not found - use panasonic pll parameters */ if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL) return -ENOMEM; } else { st->mt2060_present = 1; /* set the correct parameters for the dib3000p */ dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config); } return 0; } EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach); |
3 1 2 2 2 2 3 2 2 2 3 4 4 1 1 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat Ryos driver for Linux * * Copyright (c) 2013 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ #include <linux/types.h> #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" enum { RYOS_REPORT_NUMBER_SPECIAL = 3, RYOS_USB_INTERFACE_PROTOCOL = 0, }; struct ryos_report_special { uint8_t number; /* RYOS_REPORT_NUMBER_SPECIAL */ uint8_t data[4]; } __packed; ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x05, 0x03); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_primary, 0x06, 0x7d); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_function, 0x07, 0x5f); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_macro, 0x08, 0x23); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_thumbster, 0x09, 0x17); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_extra, 0x0a, 0x08); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_easyzone, 0x0b, 0x126); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(key_mask, 0x0c, 0x06); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light, 0x0d, 0x10); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x0e, 0x7d2); ROCCAT_COMMON2_BIN_ATTRIBUTE_R(info, 0x0f, 0x08); ROCCAT_COMMON2_BIN_ATTRIBUTE_W(reset, 0x11, 0x03); ROCCAT_COMMON2_BIN_ATTRIBUTE_W(light_control, 0x13, 0x08); ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x16, 0x10); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566); ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2); static const struct bin_attribute *const ryos_bin_attrs[] = { &bin_attr_control, &bin_attr_profile, &bin_attr_keys_primary, &bin_attr_keys_function, &bin_attr_keys_macro, &bin_attr_keys_thumbster, &bin_attr_keys_extra, &bin_attr_keys_easyzone, &bin_attr_key_mask, &bin_attr_light, &bin_attr_macro, &bin_attr_info, &bin_attr_reset, &bin_attr_light_control, &bin_attr_talk, &bin_attr_stored_lights, &bin_attr_custom_lights, &bin_attr_light_macro, NULL, }; static const struct attribute_group ryos_group = { .bin_attrs_new = ryos_bin_attrs, }; static const struct attribute_group *ryos_groups[] = { &ryos_group, NULL, }; static const struct class ryos_class = { .name = "ryos", .dev_groups = ryos_groups, }; static int ryos_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct roccat_common2_device *ryos; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol != RYOS_USB_INTERFACE_PROTOCOL) { hid_set_drvdata(hdev, NULL); return 0; } ryos = kzalloc(sizeof(*ryos), GFP_KERNEL); if (!ryos) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, ryos); retval = roccat_common2_device_init_struct(usb_dev, ryos); if (retval) { hid_err(hdev, "couldn't init Ryos device\n"); goto exit_free; } retval = roccat_connect(&ryos_class, hdev, sizeof(struct ryos_report_special)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { ryos->chrdev_minor = retval; ryos->roccat_claimed = 1; } return 0; exit_free: kfree(ryos); return retval; } static void ryos_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct roccat_common2_device *ryos; if (intf->cur_altsetting->desc.bInterfaceProtocol != RYOS_USB_INTERFACE_PROTOCOL) return; ryos = hid_get_drvdata(hdev); if (ryos->roccat_claimed) roccat_disconnect(ryos->chrdev_minor); kfree(ryos); } static int ryos_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; if (!hid_is_usb(hdev)) return -EINVAL; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = ryos_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void ryos_remove(struct hid_device *hdev) { ryos_remove_specials(hdev); hid_hw_stop(hdev); } static int ryos_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct roccat_common2_device *ryos = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != RYOS_USB_INTERFACE_PROTOCOL) return 0; if (data[0] != RYOS_REPORT_NUMBER_SPECIAL) return 0; if (ryos != NULL && ryos->roccat_claimed) roccat_report_event(ryos->chrdev_minor, data); return 0; } static const struct hid_device_id ryos_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) }, { } }; MODULE_DEVICE_TABLE(hid, ryos_devices); static struct hid_driver ryos_driver = { .name = "ryos", .id_table = ryos_devices, .probe = ryos_probe, .remove = ryos_remove, .raw_event = ryos_raw_event }; static int __init ryos_init(void) { int retval; retval = class_register(&ryos_class); if (retval) return retval; retval = hid_register_driver(&ryos_driver); if (retval) class_unregister(&ryos_class); return retval; } static void __exit ryos_exit(void) { hid_unregister_driver(&ryos_driver); class_unregister(&ryos_class); } module_init(ryos_init); module_exit(ryos_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Ryos MK/Glow/Pro driver"); MODULE_LICENSE("GPL v2"); |
26 26 26 30 25 170 30 30 30 30 29 30 25 30 5 5 5 9 169 169 169 167 169 169 168 169 26 26 26 26 26 26 26 26 26 26 26 30 30 5 30 30 171 170 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2006 - 2007 Ivo van Doorn * Copyright (C) 2007 Dmitry Torokhov * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/workqueue.h> #include <linux/capability.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/rfkill.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/fs.h> #include <linux/slab.h> #include "rfkill.h" #define POLL_INTERVAL (5 * HZ) #define RFKILL_BLOCK_HW BIT(0) #define RFKILL_BLOCK_SW BIT(1) #define RFKILL_BLOCK_SW_PREV BIT(2) #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ RFKILL_BLOCK_SW |\ RFKILL_BLOCK_SW_PREV) #define RFKILL_BLOCK_SW_SETCALL BIT(31) struct rfkill { spinlock_t lock; enum rfkill_type type; unsigned long state; unsigned long hard_block_reasons; u32 idx; bool registered; bool persistent; bool polling_paused; bool suspended; bool need_sync; const struct rfkill_ops *ops; void *data; #ifdef CONFIG_RFKILL_LEDS struct led_trigger led_trigger; const char *ledtrigname; #endif struct device dev; struct list_head node; struct delayed_work poll_work; struct work_struct uevent_work; struct work_struct sync_work; char name[]; }; #define to_rfkill(d) container_of(d, struct rfkill, dev) struct rfkill_int_event { struct list_head list; struct rfkill_event_ext ev; }; struct rfkill_data { struct list_head list; struct list_head events; struct mutex mtx; wait_queue_head_t read_wait; bool input_handler; u8 max_size; }; MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); MODULE_DESCRIPTION("RF switch support"); MODULE_LICENSE("GPL"); /* * The locking here should be made much smarter, we currently have * a bit of a stupid situation because drivers might want to register * the rfkill struct under their own lock, and take this lock during * rfkill method calls -- which will cause an AB-BA deadlock situation. * * To fix that, we need to rework this code here to be mostly lock-free * and only use the mutex for list manipulations, not to protect the * various other global variables. Then we can avoid holding the mutex * around driver operations, and all is happy. */ static LIST_HEAD(rfkill_list); /* list of registered rf switches */ static DEFINE_MUTEX(rfkill_global_mutex); static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ static unsigned int rfkill_default_state = 1; module_param_named(default_state, rfkill_default_state, uint, 0444); MODULE_PARM_DESC(default_state, "Default initial state for all radio types, 0 = radio off"); static struct { bool cur, sav; } rfkill_global_states[NUM_RFKILL_TYPES]; static bool rfkill_epo_lock_active; #ifdef CONFIG_RFKILL_LEDS static void rfkill_led_trigger_event(struct rfkill *rfkill) { struct led_trigger *trigger; if (!rfkill->registered) return; trigger = &rfkill->led_trigger; if (rfkill->state & RFKILL_BLOCK_ANY) led_trigger_event(trigger, LED_OFF); else led_trigger_event(trigger, LED_FULL); } static int rfkill_led_trigger_activate(struct led_classdev *led) { struct rfkill *rfkill; rfkill = container_of(led->trigger, struct rfkill, led_trigger); rfkill_led_trigger_event(rfkill); return 0; } const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) { return rfkill->led_trigger.name; } EXPORT_SYMBOL(rfkill_get_led_trigger_name); void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) { BUG_ON(!rfkill); rfkill->ledtrigname = name; } EXPORT_SYMBOL(rfkill_set_led_trigger_name); static int rfkill_led_trigger_register(struct rfkill *rfkill) { rfkill->led_trigger.name = rfkill->ledtrigname ? : dev_name(&rfkill->dev); rfkill->led_trigger.activate = rfkill_led_trigger_activate; return led_trigger_register(&rfkill->led_trigger); } static void rfkill_led_trigger_unregister(struct rfkill *rfkill) { led_trigger_unregister(&rfkill->led_trigger); } static struct led_trigger rfkill_any_led_trigger; static struct led_trigger rfkill_none_led_trigger; static struct work_struct rfkill_global_led_trigger_work; static void rfkill_global_led_trigger_worker(struct work_struct *work) { enum led_brightness brightness = LED_OFF; struct rfkill *rfkill; mutex_lock(&rfkill_global_mutex); list_for_each_entry(rfkill, &rfkill_list, node) { if (!(rfkill->state & RFKILL_BLOCK_ANY)) { brightness = LED_FULL; break; } } mutex_unlock(&rfkill_global_mutex); led_trigger_event(&rfkill_any_led_trigger, brightness); led_trigger_event(&rfkill_none_led_trigger, brightness == LED_OFF ? LED_FULL : LED_OFF); } static void rfkill_global_led_trigger_event(void) { schedule_work(&rfkill_global_led_trigger_work); } static int rfkill_global_led_trigger_register(void) { int ret; INIT_WORK(&rfkill_global_led_trigger_work, rfkill_global_led_trigger_worker); rfkill_any_led_trigger.name = "rfkill-any"; ret = led_trigger_register(&rfkill_any_led_trigger); if (ret) return ret; rfkill_none_led_trigger.name = "rfkill-none"; ret = led_trigger_register(&rfkill_none_led_trigger); if (ret) led_trigger_unregister(&rfkill_any_led_trigger); else /* Delay activation until all global triggers are registered */ rfkill_global_led_trigger_event(); return ret; } static void rfkill_global_led_trigger_unregister(void) { led_trigger_unregister(&rfkill_none_led_trigger); led_trigger_unregister(&rfkill_any_led_trigger); cancel_work_sync(&rfkill_global_led_trigger_work); } #else static void rfkill_led_trigger_event(struct rfkill *rfkill) { } static inline int rfkill_led_trigger_register(struct rfkill *rfkill) { return 0; } static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) { } static void rfkill_global_led_trigger_event(void) { } static int rfkill_global_led_trigger_register(void) { return 0; } static void rfkill_global_led_trigger_unregister(void) { } #endif /* CONFIG_RFKILL_LEDS */ static void rfkill_fill_event(struct rfkill_event_ext *ev, struct rfkill *rfkill, enum rfkill_operation op) { unsigned long flags; ev->idx = rfkill->idx; ev->type = rfkill->type; ev->op = op; spin_lock_irqsave(&rfkill->lock, flags); ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | RFKILL_BLOCK_SW_PREV)); ev->hard_block_reasons = rfkill->hard_block_reasons; spin_unlock_irqrestore(&rfkill->lock, flags); } static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) { struct rfkill_data *data; struct rfkill_int_event *ev; list_for_each_entry(data, &rfkill_fds, list) { ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) continue; rfkill_fill_event(&ev->ev, rfkill, op); mutex_lock(&data->mtx); list_add_tail(&ev->list, &data->events); mutex_unlock(&data->mtx); wake_up_interruptible(&data->read_wait); } } static void rfkill_event(struct rfkill *rfkill) { if (!rfkill->registered) return; kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); /* also send event to /dev/rfkill */ rfkill_send_events(rfkill, RFKILL_OP_CHANGE); } /** * rfkill_set_block - wrapper for set_block method * * @rfkill: the rfkill struct to use * @blocked: the new software state * * Calls the set_block method (when applicable) and handles notifications * etc. as well. */ static void rfkill_set_block(struct rfkill *rfkill, bool blocked) { unsigned long flags; bool prev, curr; int err; if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) return; /* * Some platforms (...!) generate input events which affect the * _hard_ kill state -- whenever something tries to change the * current software state query the hardware state too. */ if (rfkill->ops->query) rfkill->ops->query(rfkill, rfkill->data); spin_lock_irqsave(&rfkill->lock, flags); prev = rfkill->state & RFKILL_BLOCK_SW; if (prev) rfkill->state |= RFKILL_BLOCK_SW_PREV; else rfkill->state &= ~RFKILL_BLOCK_SW_PREV; if (blocked) rfkill->state |= RFKILL_BLOCK_SW; else rfkill->state &= ~RFKILL_BLOCK_SW; rfkill->state |= RFKILL_BLOCK_SW_SETCALL; spin_unlock_irqrestore(&rfkill->lock, flags); err = rfkill->ops->set_block(rfkill->data, blocked); spin_lock_irqsave(&rfkill->lock, flags); if (err) { /* * Failed -- reset status to _PREV, which may be different * from what we have set _PREV to earlier in this function * if rfkill_set_sw_state was invoked. */ if (rfkill->state & RFKILL_BLOCK_SW_PREV) rfkill->state |= RFKILL_BLOCK_SW; else rfkill->state &= ~RFKILL_BLOCK_SW; } rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; rfkill->state &= ~RFKILL_BLOCK_SW_PREV; curr = rfkill->state & RFKILL_BLOCK_SW; spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); rfkill_global_led_trigger_event(); if (prev != curr) rfkill_event(rfkill); } static void rfkill_sync(struct rfkill *rfkill) { lockdep_assert_held(&rfkill_global_mutex); if (!rfkill->need_sync) return; rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur); rfkill->need_sync = false; } static void rfkill_update_global_state(enum rfkill_type type, bool blocked) { int i; if (type != RFKILL_TYPE_ALL) { rfkill_global_states[type].cur = blocked; return; } for (i = 0; i < NUM_RFKILL_TYPES; i++) rfkill_global_states[i].cur = blocked; } #ifdef CONFIG_RFKILL_INPUT static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); /** * __rfkill_switch_all - Toggle state of all switches of given type * @type: type of interfaces to be affected * @blocked: the new state * * This function sets the state of all switches of given type, * unless a specific switch is suspended. * * Caller must have acquired rfkill_global_mutex. */ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) { struct rfkill *rfkill; rfkill_update_global_state(type, blocked); list_for_each_entry(rfkill, &rfkill_list, node) { if (rfkill->type != type && type != RFKILL_TYPE_ALL) continue; rfkill_set_block(rfkill, blocked); } } /** * rfkill_switch_all - Toggle state of all switches of given type * @type: type of interfaces to be affected * @blocked: the new state * * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). * Please refer to __rfkill_switch_all() for details. * * Does nothing if the EPO lock is active. */ void rfkill_switch_all(enum rfkill_type type, bool blocked) { if (atomic_read(&rfkill_input_disabled)) return; mutex_lock(&rfkill_global_mutex); if (!rfkill_epo_lock_active) __rfkill_switch_all(type, blocked); mutex_unlock(&rfkill_global_mutex); } /** * rfkill_epo - emergency power off all transmitters * * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. * * The global state before the EPO is saved and can be restored later * using rfkill_restore_states(). */ void rfkill_epo(void) { struct rfkill *rfkill; int i; if (atomic_read(&rfkill_input_disabled)) return; mutex_lock(&rfkill_global_mutex); rfkill_epo_lock_active = true; list_for_each_entry(rfkill, &rfkill_list, node) rfkill_set_block(rfkill, true); for (i = 0; i < NUM_RFKILL_TYPES; i++) { rfkill_global_states[i].sav = rfkill_global_states[i].cur; rfkill_global_states[i].cur = true; } mutex_unlock(&rfkill_global_mutex); } /** * rfkill_restore_states - restore global states * * Restore (and sync switches to) the global state from the * states in rfkill_default_states. This can undo the effects of * a call to rfkill_epo(). */ void rfkill_restore_states(void) { int i; if (atomic_read(&rfkill_input_disabled)) return; mutex_lock(&rfkill_global_mutex); rfkill_epo_lock_active = false; for (i = 0; i < NUM_RFKILL_TYPES; i++) __rfkill_switch_all(i, rfkill_global_states[i].sav); mutex_unlock(&rfkill_global_mutex); } /** * rfkill_remove_epo_lock - unlock state changes * * Used by rfkill-input manually unlock state changes, when * the EPO switch is deactivated. */ void rfkill_remove_epo_lock(void) { if (atomic_read(&rfkill_input_disabled)) return; mutex_lock(&rfkill_global_mutex); rfkill_epo_lock_active = false; mutex_unlock(&rfkill_global_mutex); } /** * rfkill_is_epo_lock_active - returns true EPO is active * * Returns 0 (false) if there is NOT an active EPO condition, * and 1 (true) if there is an active EPO condition, which * locks all radios in one of the BLOCKED states. * * Can be called in atomic context. */ bool rfkill_is_epo_lock_active(void) { return rfkill_epo_lock_active; } /** * rfkill_get_global_sw_state - returns global state for a type * @type: the type to get the global state of * * Returns the current global state for a given wireless * device type. */ bool rfkill_get_global_sw_state(const enum rfkill_type type) { return rfkill_global_states[type].cur; } #endif bool rfkill_set_hw_state_reason(struct rfkill *rfkill, bool blocked, enum rfkill_hard_block_reasons reason) { unsigned long flags; bool ret, prev; BUG_ON(!rfkill); spin_lock_irqsave(&rfkill->lock, flags); prev = !!(rfkill->hard_block_reasons & reason); if (blocked) { rfkill->state |= RFKILL_BLOCK_HW; rfkill->hard_block_reasons |= reason; } else { rfkill->hard_block_reasons &= ~reason; if (!rfkill->hard_block_reasons) rfkill->state &= ~RFKILL_BLOCK_HW; } ret = !!(rfkill->state & RFKILL_BLOCK_ANY); spin_unlock_irqrestore(&rfkill->lock, flags); rfkill_led_trigger_event(rfkill); rfkill_global_led_trigger_event(); if (rfkill->registered && prev != blocked) schedule_work(&rfkill->uevent_work); return ret; } EXPORT_SYMBOL(rfkill_set_hw_state_reason); static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) { u32 bit = RFKILL_BLOCK_SW; /* if in a ops->set_block right now, use other bit */ if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) bit = RFKILL_BLOCK_SW_PREV; if (blocked) rfkill->state |= bit; else rfkill->state &= ~bit; } bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) { unsigned long flags; bool prev, hwblock; BUG_ON(!rfkill); spin_lock_irqsave(&rfkill->lock, flags); prev = !!(rfkill->state & RFKILL_BLOCK_SW); __rfkill_set_sw_state(rfkill, blocked); hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); blocked = blocked || hwblock; spin_unlock_irqrestore(&rfkill->lock, flags); if (!rfkill->registered) return blocked; if (prev != blocked && !hwblock) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); rfkill_global_led_trigger_event(); return blocked; } EXPORT_SYMBOL(rfkill_set_sw_state); void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) { unsigned long flags; BUG_ON(!rfkill); BUG_ON(rfkill->registered); spin_lock_irqsave(&rfkill->lock, flags); __rfkill_set_sw_state(rfkill, blocked); rfkill->persistent = true; spin_unlock_irqrestore(&rfkill->lock, flags); } EXPORT_SYMBOL(rfkill_init_sw_state); void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) { unsigned long flags; bool swprev, hwprev; BUG_ON(!rfkill); spin_lock_irqsave(&rfkill->lock, flags); /* * No need to care about prev/setblock ... this is for uevent only * and that will get triggered by rfkill_set_block anyway. */ swprev = !!(rfkill->state & RFKILL_BLOCK_SW); hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); __rfkill_set_sw_state(rfkill, sw); if (hw) rfkill->state |= RFKILL_BLOCK_HW; else rfkill->state &= ~RFKILL_BLOCK_HW; spin_unlock_irqrestore(&rfkill->lock, flags); if (!rfkill->registered) { rfkill->persistent = true; } else { if (swprev != sw || hwprev != hw) schedule_work(&rfkill->uevent_work); rfkill_led_trigger_event(rfkill); rfkill_global_led_trigger_event(); } } EXPORT_SYMBOL(rfkill_set_states); static const char * const rfkill_types[] = { NULL, /* RFKILL_TYPE_ALL */ "wlan", "bluetooth", "ultrawideband", "wimax", "wwan", "gps", "fm", "nfc", }; enum rfkill_type rfkill_find_type(const char *name) { int i; BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES); if (!name) return RFKILL_TYPE_ALL; for (i = 1; i < NUM_RFKILL_TYPES; i++) if (!strcmp(name, rfkill_types[i])) return i; return RFKILL_TYPE_ALL; } EXPORT_SYMBOL(rfkill_find_type); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); return sysfs_emit(buf, "%s\n", rfkill->name); } static DEVICE_ATTR_RO(name); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); return sysfs_emit(buf, "%s\n", rfkill_types[rfkill->type]); } static DEVICE_ATTR_RO(type); static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); return sysfs_emit(buf, "%d\n", rfkill->idx); } static DEVICE_ATTR_RO(index); static ssize_t persistent_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); return sysfs_emit(buf, "%d\n", rfkill->persistent); } static DEVICE_ATTR_RO(persistent); static ssize_t hard_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0); } static DEVICE_ATTR_RO(hard); static ssize_t soft_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); mutex_lock(&rfkill_global_mutex); rfkill_sync(rfkill); mutex_unlock(&rfkill_global_mutex); return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0); } static ssize_t soft_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct rfkill *rfkill = to_rfkill(dev); unsigned long state; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; err = kstrtoul(buf, 0, &state); if (err) return err; if (state > 1 ) return -EINVAL; mutex_lock(&rfkill_global_mutex); rfkill_sync(rfkill); rfkill_set_block(rfkill, state); mutex_unlock(&rfkill_global_mutex); return count; } static DEVICE_ATTR_RW(soft); static ssize_t hard_block_reasons_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); return sysfs_emit(buf, "0x%lx\n", rfkill->hard_block_reasons); } static DEVICE_ATTR_RO(hard_block_reasons); static u8 user_state_from_blocked(unsigned long state) { if (state & RFKILL_BLOCK_HW) return RFKILL_USER_STATE_HARD_BLOCKED; if (state & RFKILL_BLOCK_SW) return RFKILL_USER_STATE_SOFT_BLOCKED; return RFKILL_USER_STATE_UNBLOCKED; } static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rfkill *rfkill = to_rfkill(dev); mutex_lock(&rfkill_global_mutex); rfkill_sync(rfkill); mutex_unlock(&rfkill_global_mutex); return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state)); } static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct rfkill *rfkill = to_rfkill(dev); unsigned long state; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; err = kstrtoul(buf, 0, &state); if (err) return err; if (state != RFKILL_USER_STATE_SOFT_BLOCKED && state != RFKILL_USER_STATE_UNBLOCKED) return -EINVAL; mutex_lock(&rfkill_global_mutex); rfkill_sync(rfkill); rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); mutex_unlock(&rfkill_global_mutex); return count; } static DEVICE_ATTR_RW(state); static struct attribute *rfkill_dev_attrs[] = { &dev_attr_name.attr, &dev_attr_type.attr, &dev_attr_index.attr, &dev_attr_persistent.attr, &dev_attr_state.attr, &dev_attr_soft.attr, &dev_attr_hard.attr, &dev_attr_hard_block_reasons.attr, NULL, }; ATTRIBUTE_GROUPS(rfkill_dev); static void rfkill_release(struct device *dev) { struct rfkill *rfkill = to_rfkill(dev); kfree(rfkill); } static int rfkill_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) { struct rfkill *rfkill = to_rfkill(dev); unsigned long flags; unsigned long reasons; u32 state; int error; error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); if (error) return error; error = add_uevent_var(env, "RFKILL_TYPE=%s", rfkill_types[rfkill->type]); if (error) return error; spin_lock_irqsave(&rfkill->lock, flags); state = rfkill->state; reasons = rfkill->hard_block_reasons; spin_unlock_irqrestore(&rfkill->lock, flags); error = add_uevent_var(env, "RFKILL_STATE=%d", user_state_from_blocked(state)); if (error) return error; return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons); } void rfkill_pause_polling(struct rfkill *rfkill) { BUG_ON(!rfkill); if (!rfkill->ops->poll) return; rfkill->polling_paused = true; cancel_delayed_work_sync(&rfkill->poll_work); } EXPORT_SYMBOL(rfkill_pause_polling); void rfkill_resume_polling(struct rfkill *rfkill) { BUG_ON(!rfkill); if (!rfkill->ops->poll) return; rfkill->polling_paused = false; if (rfkill->suspended) return; queue_delayed_work(system_power_efficient_wq, &rfkill->poll_work, 0); } EXPORT_SYMBOL(rfkill_resume_polling); #ifdef CONFIG_PM_SLEEP static int rfkill_suspend(struct device *dev) { struct rfkill *rfkill = to_rfkill(dev); rfkill->suspended = true; cancel_delayed_work_sync(&rfkill->poll_work); return 0; } static int rfkill_resume(struct device *dev) { struct rfkill *rfkill = to_rfkill(dev); bool cur; rfkill->suspended = false; if (!rfkill->registered) return 0; if (!rfkill->persistent) { cur = !!(rfkill->state & RFKILL_BLOCK_SW); rfkill_set_block(rfkill, cur); } if (rfkill->ops->poll && !rfkill->polling_paused) queue_delayed_work(system_power_efficient_wq, &rfkill->poll_work, 0); return 0; } static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume); #define RFKILL_PM_OPS (&rfkill_pm_ops) #else #define RFKILL_PM_OPS NULL #endif static struct class rfkill_class = { .name = "rfkill", .dev_release = rfkill_release, .dev_groups = rfkill_dev_groups, .dev_uevent = rfkill_dev_uevent, .pm = RFKILL_PM_OPS, }; bool rfkill_blocked(struct rfkill *rfkill) { unsigned long flags; u32 state; spin_lock_irqsave(&rfkill->lock, flags); state = rfkill->state; spin_unlock_irqrestore(&rfkill->lock, flags); return !!(state & RFKILL_BLOCK_ANY); } EXPORT_SYMBOL(rfkill_blocked); bool rfkill_soft_blocked(struct rfkill *rfkill) { unsigned long flags; u32 state; spin_lock_irqsave(&rfkill->lock, flags); state = rfkill->state; spin_unlock_irqrestore(&rfkill->lock, flags); return !!(state & RFKILL_BLOCK_SW); } EXPORT_SYMBOL(rfkill_soft_blocked); struct rfkill * __must_check rfkill_alloc(const char *name, struct device *parent, const enum rfkill_type type, const struct rfkill_ops *ops, void *ops_data) { struct rfkill *rfkill; struct device *dev; if (WARN_ON(!ops)) return NULL; if (WARN_ON(!ops->set_block)) return NULL; if (WARN_ON(!name)) return NULL; if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) return NULL; rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); if (!rfkill) return NULL; spin_lock_init(&rfkill->lock); INIT_LIST_HEAD(&rfkill->node); rfkill->type = type; strcpy(rfkill->name, name); rfkill->ops = ops; rfkill->data = ops_data; dev = &rfkill->dev; dev->class = &rfkill_class; dev->parent = parent; device_initialize(dev); return rfkill; } EXPORT_SYMBOL(rfkill_alloc); static void rfkill_poll(struct work_struct *work) { struct rfkill *rfkill; rfkill = container_of(work, struct rfkill, poll_work.work); /* * Poll hardware state -- driver will use one of the * rfkill_set{,_hw,_sw}_state functions and use its * return value to update the current status. */ rfkill->ops->poll(rfkill, rfkill->data); queue_delayed_work(system_power_efficient_wq, &rfkill->poll_work, round_jiffies_relative(POLL_INTERVAL)); } static void rfkill_uevent_work(struct work_struct *work) { struct rfkill *rfkill; rfkill = container_of(work, struct rfkill, uevent_work); mutex_lock(&rfkill_global_mutex); rfkill_event(rfkill); mutex_unlock(&rfkill_global_mutex); } static void rfkill_sync_work(struct work_struct *work) { struct rfkill *rfkill = container_of(work, struct rfkill, sync_work); mutex_lock(&rfkill_global_mutex); rfkill_sync(rfkill); mutex_unlock(&rfkill_global_mutex); } int __must_check rfkill_register(struct rfkill *rfkill) { static unsigned long rfkill_no; struct device *dev; int error; if (!rfkill) return -EINVAL; dev = &rfkill->dev; mutex_lock(&rfkill_global_mutex); if (rfkill->registered) { error = -EALREADY; goto unlock; } rfkill->idx = rfkill_no; dev_set_name(dev, "rfkill%lu", rfkill_no); rfkill_no++; list_add_tail(&rfkill->node, &rfkill_list); error = device_add(dev); if (error) goto remove; error = rfkill_led_trigger_register(rfkill); if (error) goto devdel; rfkill->registered = true; INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); INIT_WORK(&rfkill->sync_work, rfkill_sync_work); if (rfkill->ops->poll) queue_delayed_work(system_power_efficient_wq, &rfkill->poll_work, round_jiffies_relative(POLL_INTERVAL)); if (!rfkill->persistent || rfkill_epo_lock_active) { rfkill->need_sync = true; schedule_work(&rfkill->sync_work); } else { #ifdef CONFIG_RFKILL_INPUT bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); if (!atomic_read(&rfkill_input_disabled)) __rfkill_switch_all(rfkill->type, soft_blocked); #endif } rfkill_global_led_trigger_event(); rfkill_send_events(rfkill, RFKILL_OP_ADD); mutex_unlock(&rfkill_global_mutex); return 0; devdel: device_del(&rfkill->dev); remove: list_del_init(&rfkill->node); unlock: mutex_unlock(&rfkill_global_mutex); return error; } EXPORT_SYMBOL(rfkill_register); void rfkill_unregister(struct rfkill *rfkill) { BUG_ON(!rfkill); if (rfkill->ops->poll) cancel_delayed_work_sync(&rfkill->poll_work); cancel_work_sync(&rfkill->uevent_work); cancel_work_sync(&rfkill->sync_work); rfkill->registered = false; device_del(&rfkill->dev); mutex_lock(&rfkill_global_mutex); rfkill_send_events(rfkill, RFKILL_OP_DEL); list_del_init(&rfkill->node); rfkill_global_led_trigger_event(); mutex_unlock(&rfkill_global_mutex); rfkill_led_trigger_unregister(rfkill); } EXPORT_SYMBOL(rfkill_unregister); void rfkill_destroy(struct rfkill *rfkill) { if (rfkill) put_device(&rfkill->dev); } EXPORT_SYMBOL(rfkill_destroy); static int rfkill_fop_open(struct inode *inode, struct file *file) { struct rfkill_data *data; struct rfkill *rfkill; struct rfkill_int_event *ev, *tmp; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->max_size = RFKILL_EVENT_SIZE_V1; INIT_LIST_HEAD(&data->events); mutex_init(&data->mtx); init_waitqueue_head(&data->read_wait); mutex_lock(&rfkill_global_mutex); /* * start getting events from elsewhere but hold mtx to get * startup events added first */ list_for_each_entry(rfkill, &rfkill_list, node) { ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) goto free; rfkill_sync(rfkill); rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); mutex_lock(&data->mtx); list_add_tail(&ev->list, &data->events); mutex_unlock(&data->mtx); } list_add(&data->list, &rfkill_fds); mutex_unlock(&rfkill_global_mutex); file->private_data = data; return stream_open(inode, file); free: mutex_unlock(&rfkill_global_mutex); mutex_destroy(&data->mtx); list_for_each_entry_safe(ev, tmp, &data->events, list) kfree(ev); kfree(data); return -ENOMEM; } static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait) { struct rfkill_data *data = file->private_data; __poll_t res = EPOLLOUT | EPOLLWRNORM; poll_wait(file, &data->read_wait, wait); mutex_lock(&data->mtx); if (!list_empty(&data->events)) res = EPOLLIN | EPOLLRDNORM; mutex_unlock(&data->mtx); return res; } static ssize_t rfkill_fop_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct rfkill_data *data = file->private_data; struct rfkill_int_event *ev; unsigned long sz; int ret; mutex_lock(&data->mtx); while (list_empty(&data->events)) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto out; } mutex_unlock(&data->mtx); /* since we re-check and it just compares pointers, * using !list_empty() without locking isn't a problem */ ret = wait_event_interruptible(data->read_wait, !list_empty(&data->events)); mutex_lock(&data->mtx); if (ret) goto out; } ev = list_first_entry(&data->events, struct rfkill_int_event, list); sz = min_t(unsigned long, sizeof(ev->ev), count); sz = min_t(unsigned long, sz, data->max_size); ret = sz; if (copy_to_user(buf, &ev->ev, sz)) ret = -EFAULT; list_del(&ev->list); kfree(ev); out: mutex_unlock(&data->mtx); return ret; } static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct rfkill_data *data = file->private_data; struct rfkill *rfkill; struct rfkill_event_ext ev; int ret; /* we don't need the 'hard' variable but accept it */ if (count < RFKILL_EVENT_SIZE_V1 - 1) return -EINVAL; /* * Copy as much data as we can accept into our 'ev' buffer, * but tell userspace how much we've copied so it can determine * our API version even in a write() call, if it cares. */ count = min(count, sizeof(ev)); count = min_t(size_t, count, data->max_size); if (copy_from_user(&ev, buf, count)) return -EFAULT; if (ev.type >= NUM_RFKILL_TYPES) return -EINVAL; mutex_lock(&rfkill_global_mutex); switch (ev.op) { case RFKILL_OP_CHANGE_ALL: rfkill_update_global_state(ev.type, ev.soft); list_for_each_entry(rfkill, &rfkill_list, node) if (rfkill->type == ev.type || ev.type == RFKILL_TYPE_ALL) rfkill_set_block(rfkill, ev.soft); ret = 0; break; case RFKILL_OP_CHANGE: list_for_each_entry(rfkill, &rfkill_list, node) if (rfkill->idx == ev.idx && (rfkill->type == ev.type || ev.type == RFKILL_TYPE_ALL)) rfkill_set_block(rfkill, ev.soft); ret = 0; break; default: ret = -EINVAL; break; } mutex_unlock(&rfkill_global_mutex); return ret ?: count; } static int rfkill_fop_release(struct inode *inode, struct file *file) { struct rfkill_data *data = file->private_data; struct rfkill_int_event *ev, *tmp; mutex_lock(&rfkill_global_mutex); list_del(&data->list); mutex_unlock(&rfkill_global_mutex); mutex_destroy(&data->mtx); list_for_each_entry_safe(ev, tmp, &data->events, list) kfree(ev); #ifdef CONFIG_RFKILL_INPUT if (data->input_handler) if (atomic_dec_return(&rfkill_input_disabled) == 0) printk(KERN_DEBUG "rfkill: input handler enabled\n"); #endif kfree(data); return 0; } static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct rfkill_data *data = file->private_data; int ret = -ENOTTY; u32 size; if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) return -ENOTTY; mutex_lock(&data->mtx); switch (_IOC_NR(cmd)) { #ifdef CONFIG_RFKILL_INPUT case RFKILL_IOC_NOINPUT: if (!data->input_handler) { if (atomic_inc_return(&rfkill_input_disabled) == 1) printk(KERN_DEBUG "rfkill: input handler disabled\n"); data->input_handler = true; } ret = 0; break; #endif case RFKILL_IOC_MAX_SIZE: if (get_user(size, (__u32 __user *)arg)) { ret = -EFAULT; break; } if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) { ret = -EINVAL; break; } data->max_size = size; ret = 0; break; default: break; } mutex_unlock(&data->mtx); return ret; } static const struct file_operations rfkill_fops = { .owner = THIS_MODULE, .open = rfkill_fop_open, .read = rfkill_fop_read, .write = rfkill_fop_write, .poll = rfkill_fop_poll, .release = rfkill_fop_release, .unlocked_ioctl = rfkill_fop_ioctl, .compat_ioctl = compat_ptr_ioctl, }; #define RFKILL_NAME "rfkill" static struct miscdevice rfkill_miscdev = { .fops = &rfkill_fops, .name = RFKILL_NAME, .minor = RFKILL_MINOR, }; static int __init rfkill_init(void) { int error; rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state); error = class_register(&rfkill_class); if (error) goto error_class; error = misc_register(&rfkill_miscdev); if (error) goto error_misc; error = rfkill_global_led_trigger_register(); if (error) goto error_led_trigger; #ifdef CONFIG_RFKILL_INPUT error = rfkill_handler_init(); if (error) goto error_input; #endif return 0; #ifdef CONFIG_RFKILL_INPUT error_input: rfkill_global_led_trigger_unregister(); #endif error_led_trigger: misc_deregister(&rfkill_miscdev); error_misc: class_unregister(&rfkill_class); error_class: return error; } subsys_initcall(rfkill_init); static void __exit rfkill_exit(void) { #ifdef CONFIG_RFKILL_INPUT rfkill_handler_exit(); #endif rfkill_global_led_trigger_unregister(); misc_deregister(&rfkill_miscdev); class_unregister(&rfkill_class); } module_exit(rfkill_exit); MODULE_ALIAS_MISCDEV(RFKILL_MINOR); MODULE_ALIAS("devname:" RFKILL_NAME); |
3 3 3 3 3 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 2 1 1 1 1 1 1 1 1 1 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HackRF driver * * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> /* * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too * strong signal or transmitting to bad antenna. * Set RF gain control to 'grabbed' state by default for sure. */ static bool hackrf_enable_rf_gain_ctrl; module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644); MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)"); /* HackRF USB API commands (from HackRF Library) */ enum { CMD_SET_TRANSCEIVER_MODE = 0x01, CMD_SAMPLE_RATE_SET = 0x06, CMD_BASEBAND_FILTER_BANDWIDTH_SET = 0x07, CMD_BOARD_ID_READ = 0x0e, CMD_VERSION_STRING_READ = 0x0f, CMD_SET_FREQ = 0x10, CMD_AMP_ENABLE = 0x11, CMD_SET_LNA_GAIN = 0x13, CMD_SET_VGA_GAIN = 0x14, CMD_SET_TXVGA_GAIN = 0x15, }; /* * bEndpointAddress 0x81 EP 1 IN * Transfer Type Bulk * wMaxPacketSize 0x0200 1x 512 bytes */ #define MAX_BULK_BUFS (6) #define BULK_BUFFER_SIZE (128 * 512) static const struct v4l2_frequency_band bands_adc_dac[] = { { .tuner = 0, .type = V4L2_TUNER_SDR, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 200000, .rangehigh = 24000000, }, }; static const struct v4l2_frequency_band bands_rx_tx[] = { { .tuner = 1, .type = V4L2_TUNER_RF, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 1, .rangehigh = 4294967294LL, /* max u32, hw goes over 7GHz */ }, }; /* stream formats */ struct hackrf_format { u32 pixelformat; u32 buffersize; }; /* format descriptions for capture and preview */ static struct hackrf_format formats[] = { { .pixelformat = V4L2_SDR_FMT_CS8, .buffersize = BULK_BUFFER_SIZE, }, }; static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats); /* intermediate buffers with raw data from the USB device */ struct hackrf_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; struct hackrf_dev { #define USB_STATE_URB_BUF 1 /* XXX: set manually */ #define RX_ON 4 #define TX_ON 5 #define RX_ADC_FREQUENCY 11 #define TX_DAC_FREQUENCY 12 #define RX_BANDWIDTH 13 #define TX_BANDWIDTH 14 #define RX_RF_FREQUENCY 15 #define TX_RF_FREQUENCY 16 #define RX_RF_GAIN 17 #define TX_RF_GAIN 18 #define RX_IF_GAIN 19 #define RX_LNA_GAIN 20 #define TX_LNA_GAIN 21 unsigned long flags; struct usb_interface *intf; struct device *dev; struct usb_device *udev; struct video_device rx_vdev; struct video_device tx_vdev; struct v4l2_device v4l2_dev; /* videobuf2 queue and queued buffers list */ struct vb2_queue rx_vb2_queue; struct vb2_queue tx_vb2_queue; struct list_head rx_buffer_list; struct list_head tx_buffer_list; spinlock_t buffer_list_lock; /* Protects buffer_list */ unsigned int sequence; /* Buffer sequence counter */ unsigned int vb_full; /* vb is full and packets dropped */ unsigned int vb_empty; /* vb is empty and packets dropped */ /* Note if taking both locks v4l2_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue */ struct urb *urb_list[MAX_BULK_BUFS]; int buf_num; unsigned long buf_size; u8 *buf_list[MAX_BULK_BUFS]; dma_addr_t dma_addr[MAX_BULK_BUFS]; int urbs_initialized; int urbs_submitted; /* USB control message buffer */ #define BUF_SIZE 24 u8 buf[BUF_SIZE]; /* Current configuration */ unsigned int f_adc; unsigned int f_dac; unsigned int f_rx; unsigned int f_tx; u32 pixelformat; u32 buffersize; /* Controls */ struct v4l2_ctrl_handler rx_ctrl_handler; struct v4l2_ctrl *rx_bandwidth_auto; struct v4l2_ctrl *rx_bandwidth; struct v4l2_ctrl *rx_rf_gain; struct v4l2_ctrl *rx_lna_gain; struct v4l2_ctrl *rx_if_gain; struct v4l2_ctrl_handler tx_ctrl_handler; struct v4l2_ctrl *tx_bandwidth_auto; struct v4l2_ctrl *tx_bandwidth; struct v4l2_ctrl *tx_rf_gain; struct v4l2_ctrl *tx_lna_gain; /* Sample rate calc */ unsigned long jiffies_next; unsigned int sample; unsigned int sample_measured; }; #define hackrf_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \ char *_direction; \ if (_t & USB_DIR_IN) \ _direction = "<<<"; \ else \ _direction = ">>>"; \ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, \ _i >> 8, _l & 0xff, _l >> 8, _direction, _l, _b); \ } /* execute firmware command */ static int hackrf_ctrl_msg(struct hackrf_dev *dev, u8 request, u16 value, u16 index, u8 *data, u16 size) { int ret; unsigned int pipe; u8 requesttype; switch (request) { case CMD_SET_TRANSCEIVER_MODE: case CMD_SET_FREQ: case CMD_AMP_ENABLE: case CMD_SAMPLE_RATE_SET: case CMD_BASEBAND_FILTER_BANDWIDTH_SET: pipe = usb_sndctrlpipe(dev->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT); break; case CMD_BOARD_ID_READ: case CMD_VERSION_STRING_READ: case CMD_SET_LNA_GAIN: case CMD_SET_VGA_GAIN: case CMD_SET_TXVGA_GAIN: pipe = usb_rcvctrlpipe(dev->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); break; default: dev_err(dev->dev, "Unknown command %02x\n", request); ret = -EINVAL; goto err; } /* write request */ if (!(requesttype & USB_DIR_IN)) memcpy(dev->buf, data, size); ret = usb_control_msg(dev->udev, pipe, request, requesttype, value, index, dev->buf, size, 1000); hackrf_dbg_usb_control_msg(dev->dev, request, requesttype, value, index, dev->buf, size); if (ret < 0) { dev_err(dev->dev, "usb_control_msg() failed %d request %02x\n", ret, request); goto err; } /* read request */ if (requesttype & USB_DIR_IN) memcpy(data, dev->buf, size); return 0; err: return ret; } static int hackrf_set_params(struct hackrf_dev *dev) { struct usb_interface *intf = dev->intf; int ret, i; u8 buf[8], u8tmp; unsigned int uitmp, uitmp1, uitmp2; const bool rx = test_bit(RX_ON, &dev->flags); const bool tx = test_bit(TX_ON, &dev->flags); static const struct { u32 freq; } bandwidth_lut[] = { { 1750000}, /* 1.75 MHz */ { 2500000}, /* 2.5 MHz */ { 3500000}, /* 3.5 MHz */ { 5000000}, /* 5 MHz */ { 5500000}, /* 5.5 MHz */ { 6000000}, /* 6 MHz */ { 7000000}, /* 7 MHz */ { 8000000}, /* 8 MHz */ { 9000000}, /* 9 MHz */ {10000000}, /* 10 MHz */ {12000000}, /* 12 MHz */ {14000000}, /* 14 MHz */ {15000000}, /* 15 MHz */ {20000000}, /* 20 MHz */ {24000000}, /* 24 MHz */ {28000000}, /* 28 MHz */ }; if (!rx && !tx) { dev_dbg(&intf->dev, "device is sleeping\n"); return 0; } /* ADC / DAC frequency */ if (rx && test_and_clear_bit(RX_ADC_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "RX ADC frequency=%u Hz\n", dev->f_adc); uitmp1 = dev->f_adc; uitmp2 = 1; set_bit(TX_DAC_FREQUENCY, &dev->flags); } else if (tx && test_and_clear_bit(TX_DAC_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "TX DAC frequency=%u Hz\n", dev->f_dac); uitmp1 = dev->f_dac; uitmp2 = 1; set_bit(RX_ADC_FREQUENCY, &dev->flags); } else { uitmp1 = uitmp2 = 0; } if (uitmp1 || uitmp2) { buf[0] = (uitmp1 >> 0) & 0xff; buf[1] = (uitmp1 >> 8) & 0xff; buf[2] = (uitmp1 >> 16) & 0xff; buf[3] = (uitmp1 >> 24) & 0xff; buf[4] = (uitmp2 >> 0) & 0xff; buf[5] = (uitmp2 >> 8) & 0xff; buf[6] = (uitmp2 >> 16) & 0xff; buf[7] = (uitmp2 >> 24) & 0xff; ret = hackrf_ctrl_msg(dev, CMD_SAMPLE_RATE_SET, 0, 0, buf, 8); if (ret) goto err; } /* bandwidth */ if (rx && test_and_clear_bit(RX_BANDWIDTH, &dev->flags)) { if (dev->rx_bandwidth_auto->val == true) uitmp = dev->f_adc; else uitmp = dev->rx_bandwidth->val; for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) { if (uitmp <= bandwidth_lut[i].freq) { uitmp = bandwidth_lut[i].freq; break; } } dev->rx_bandwidth->val = uitmp; dev->rx_bandwidth->cur.val = uitmp; dev_dbg(&intf->dev, "RX bandwidth selected=%u\n", uitmp); set_bit(TX_BANDWIDTH, &dev->flags); } else if (tx && test_and_clear_bit(TX_BANDWIDTH, &dev->flags)) { if (dev->tx_bandwidth_auto->val == true) uitmp = dev->f_dac; else uitmp = dev->tx_bandwidth->val; for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) { if (uitmp <= bandwidth_lut[i].freq) { uitmp = bandwidth_lut[i].freq; break; } } dev->tx_bandwidth->val = uitmp; dev->tx_bandwidth->cur.val = uitmp; dev_dbg(&intf->dev, "TX bandwidth selected=%u\n", uitmp); set_bit(RX_BANDWIDTH, &dev->flags); } else { uitmp = 0; } if (uitmp) { uitmp1 = uitmp2 = 0; uitmp1 |= ((uitmp >> 0) & 0xff) << 0; uitmp1 |= ((uitmp >> 8) & 0xff) << 8; uitmp2 |= ((uitmp >> 16) & 0xff) << 0; uitmp2 |= ((uitmp >> 24) & 0xff) << 8; ret = hackrf_ctrl_msg(dev, CMD_BASEBAND_FILTER_BANDWIDTH_SET, uitmp1, uitmp2, NULL, 0); if (ret) goto err; } /* RX / TX RF frequency */ if (rx && test_and_clear_bit(RX_RF_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "RX RF frequency=%u Hz\n", dev->f_rx); uitmp1 = dev->f_rx / 1000000; uitmp2 = dev->f_rx % 1000000; set_bit(TX_RF_FREQUENCY, &dev->flags); } else if (tx && test_and_clear_bit(TX_RF_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "TX RF frequency=%u Hz\n", dev->f_tx); uitmp1 = dev->f_tx / 1000000; uitmp2 = dev->f_tx % 1000000; set_bit(RX_RF_FREQUENCY, &dev->flags); } else { uitmp1 = uitmp2 = 0; } if (uitmp1 || uitmp2) { buf[0] = (uitmp1 >> 0) & 0xff; buf[1] = (uitmp1 >> 8) & 0xff; buf[2] = (uitmp1 >> 16) & 0xff; buf[3] = (uitmp1 >> 24) & 0xff; buf[4] = (uitmp2 >> 0) & 0xff; buf[5] = (uitmp2 >> 8) & 0xff; buf[6] = (uitmp2 >> 16) & 0xff; buf[7] = (uitmp2 >> 24) & 0xff; ret = hackrf_ctrl_msg(dev, CMD_SET_FREQ, 0, 0, buf, 8); if (ret) goto err; } /* RX RF gain */ if (rx && test_and_clear_bit(RX_RF_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "RX RF gain val=%d->%d\n", dev->rx_rf_gain->cur.val, dev->rx_rf_gain->val); u8tmp = (dev->rx_rf_gain->val) ? 1 : 0; ret = hackrf_ctrl_msg(dev, CMD_AMP_ENABLE, u8tmp, 0, NULL, 0); if (ret) goto err; set_bit(TX_RF_GAIN, &dev->flags); } /* TX RF gain */ if (tx && test_and_clear_bit(TX_RF_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "TX RF gain val=%d->%d\n", dev->tx_rf_gain->cur.val, dev->tx_rf_gain->val); u8tmp = (dev->tx_rf_gain->val) ? 1 : 0; ret = hackrf_ctrl_msg(dev, CMD_AMP_ENABLE, u8tmp, 0, NULL, 0); if (ret) goto err; set_bit(RX_RF_GAIN, &dev->flags); } /* RX LNA gain */ if (rx && test_and_clear_bit(RX_LNA_GAIN, &dev->flags)) { dev_dbg(dev->dev, "RX LNA gain val=%d->%d\n", dev->rx_lna_gain->cur.val, dev->rx_lna_gain->val); ret = hackrf_ctrl_msg(dev, CMD_SET_LNA_GAIN, 0, dev->rx_lna_gain->val, &u8tmp, 1); if (ret) goto err; } /* RX IF gain */ if (rx && test_and_clear_bit(RX_IF_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "IF gain val=%d->%d\n", dev->rx_if_gain->cur.val, dev->rx_if_gain->val); ret = hackrf_ctrl_msg(dev, CMD_SET_VGA_GAIN, 0, dev->rx_if_gain->val, &u8tmp, 1); if (ret) goto err; } /* TX LNA gain */ if (tx && test_and_clear_bit(TX_LNA_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "TX LNA gain val=%d->%d\n", dev->tx_lna_gain->cur.val, dev->tx_lna_gain->val); ret = hackrf_ctrl_msg(dev, CMD_SET_TXVGA_GAIN, 0, dev->tx_lna_gain->val, &u8tmp, 1); if (ret) goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } /* Private functions */ static struct hackrf_buffer *hackrf_get_next_buffer(struct hackrf_dev *dev, struct list_head *buffer_list) { unsigned long flags; struct hackrf_buffer *buffer = NULL; spin_lock_irqsave(&dev->buffer_list_lock, flags); if (list_empty(buffer_list)) goto leave; buffer = list_entry(buffer_list->next, struct hackrf_buffer, list); list_del(&buffer->list); leave: spin_unlock_irqrestore(&dev->buffer_list_lock, flags); return buffer; } static void hackrf_copy_stream(struct hackrf_dev *dev, void *dst, void *src, unsigned int src_len) { memcpy(dst, src, src_len); /* calculate sample rate and output it in 10 seconds intervals */ if (unlikely(time_is_before_jiffies(dev->jiffies_next))) { #define MSECS 10000UL unsigned int msecs = jiffies_to_msecs(jiffies - dev->jiffies_next + msecs_to_jiffies(MSECS)); unsigned int samples = dev->sample - dev->sample_measured; dev->jiffies_next = jiffies + msecs_to_jiffies(MSECS); dev->sample_measured = dev->sample; dev_dbg(dev->dev, "slen=%u samples=%u msecs=%u sample rate=%lu\n", src_len, samples, msecs, samples * 1000UL / msecs); } /* total number of samples */ dev->sample += src_len / 2; } /* * This gets called for the bulk stream pipe. This is done in interrupt * time, so it has to be fast, not crash, and not stall. Neat. */ static void hackrf_urb_complete_in(struct urb *urb) { struct hackrf_dev *dev = urb->context; struct usb_interface *intf = dev->intf; struct hackrf_buffer *buffer; unsigned int len; dev_dbg_ratelimited(&intf->dev, "status=%d length=%u/%u\n", urb->status, urb->actual_length, urb->transfer_buffer_length); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_err_ratelimited(&intf->dev, "URB failed %d\n", urb->status); goto exit_usb_submit_urb; } /* get buffer to write */ buffer = hackrf_get_next_buffer(dev, &dev->rx_buffer_list); if (unlikely(buffer == NULL)) { dev->vb_full++; dev_notice_ratelimited(&intf->dev, "buffer is full - %u packets dropped\n", dev->vb_full); goto exit_usb_submit_urb; } len = min_t(unsigned long, vb2_plane_size(&buffer->vb.vb2_buf, 0), urb->actual_length); hackrf_copy_stream(dev, vb2_plane_vaddr(&buffer->vb.vb2_buf, 0), urb->transfer_buffer, len); vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, len); buffer->vb.sequence = dev->sequence++; buffer->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE); exit_usb_submit_urb: usb_submit_urb(urb, GFP_ATOMIC); } static void hackrf_urb_complete_out(struct urb *urb) { struct hackrf_dev *dev = urb->context; struct usb_interface *intf = dev->intf; struct hackrf_buffer *buffer; unsigned int len; dev_dbg_ratelimited(&intf->dev, "status=%d length=%u/%u\n", urb->status, urb->actual_length, urb->transfer_buffer_length); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_err_ratelimited(&intf->dev, "URB failed %d\n", urb->status); } /* get buffer to read */ buffer = hackrf_get_next_buffer(dev, &dev->tx_buffer_list); if (unlikely(buffer == NULL)) { dev->vb_empty++; dev_notice_ratelimited(&intf->dev, "buffer is empty - %u packets dropped\n", dev->vb_empty); urb->actual_length = 0; goto exit_usb_submit_urb; } len = min_t(unsigned long, urb->transfer_buffer_length, vb2_get_plane_payload(&buffer->vb.vb2_buf, 0)); hackrf_copy_stream(dev, urb->transfer_buffer, vb2_plane_vaddr(&buffer->vb.vb2_buf, 0), len); urb->actual_length = len; buffer->vb.sequence = dev->sequence++; buffer->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE); exit_usb_submit_urb: usb_submit_urb(urb, GFP_ATOMIC); } static int hackrf_kill_urbs(struct hackrf_dev *dev) { int i; for (i = dev->urbs_submitted - 1; i >= 0; i--) { dev_dbg(dev->dev, "kill urb=%d\n", i); /* stop the URB */ usb_kill_urb(dev->urb_list[i]); } dev->urbs_submitted = 0; return 0; } static int hackrf_submit_urbs(struct hackrf_dev *dev) { int i, ret; for (i = 0; i < dev->urbs_initialized; i++) { dev_dbg(dev->dev, "submit urb=%d\n", i); ret = usb_submit_urb(dev->urb_list[i], GFP_KERNEL); if (ret) { dev_err(dev->dev, "Could not submit URB no. %d - get them all back\n", i); hackrf_kill_urbs(dev); return ret; } dev->urbs_submitted++; } return 0; } static int hackrf_free_stream_bufs(struct hackrf_dev *dev) { if (dev->flags & USB_STATE_URB_BUF) { while (dev->buf_num) { dev->buf_num--; dev_dbg(dev->dev, "free buf=%d\n", dev->buf_num); usb_free_coherent(dev->udev, dev->buf_size, dev->buf_list[dev->buf_num], dev->dma_addr[dev->buf_num]); } } dev->flags &= ~USB_STATE_URB_BUF; return 0; } static int hackrf_alloc_stream_bufs(struct hackrf_dev *dev) { dev->buf_num = 0; dev->buf_size = BULK_BUFFER_SIZE; dev_dbg(dev->dev, "all in all I will use %u bytes for streaming\n", MAX_BULK_BUFS * BULK_BUFFER_SIZE); for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) { dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev, BULK_BUFFER_SIZE, GFP_KERNEL, &dev->dma_addr[dev->buf_num]); if (!dev->buf_list[dev->buf_num]) { dev_dbg(dev->dev, "alloc buf=%d failed\n", dev->buf_num); hackrf_free_stream_bufs(dev); return -ENOMEM; } dev_dbg(dev->dev, "alloc buf=%d %p (dma %llu)\n", dev->buf_num, dev->buf_list[dev->buf_num], (long long)dev->dma_addr[dev->buf_num]); dev->flags |= USB_STATE_URB_BUF; } return 0; } static int hackrf_free_urbs(struct hackrf_dev *dev) { int i; hackrf_kill_urbs(dev); for (i = dev->urbs_initialized - 1; i >= 0; i--) { if (dev->urb_list[i]) { dev_dbg(dev->dev, "free urb=%d\n", i); /* free the URBs */ usb_free_urb(dev->urb_list[i]); } } dev->urbs_initialized = 0; return 0; } static int hackrf_alloc_urbs(struct hackrf_dev *dev, bool rcv) { int i, j; unsigned int pipe; usb_complete_t complete; if (rcv) { pipe = usb_rcvbulkpipe(dev->udev, 0x81); complete = &hackrf_urb_complete_in; } else { pipe = usb_sndbulkpipe(dev->udev, 0x02); complete = &hackrf_urb_complete_out; } /* allocate the URBs */ for (i = 0; i < MAX_BULK_BUFS; i++) { dev_dbg(dev->dev, "alloc urb=%d\n", i); dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_list[i]) { for (j = 0; j < i; j++) usb_free_urb(dev->urb_list[j]); return -ENOMEM; } usb_fill_bulk_urb(dev->urb_list[i], dev->udev, pipe, dev->buf_list[i], BULK_BUFFER_SIZE, complete, dev); dev->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; dev->urb_list[i]->transfer_dma = dev->dma_addr[i]; dev->urbs_initialized++; } return 0; } /* The user yanked out the cable... */ static void hackrf_disconnect(struct usb_interface *intf) { struct v4l2_device *v = usb_get_intfdata(intf); struct hackrf_dev *dev = container_of(v, struct hackrf_dev, v4l2_dev); dev_dbg(dev->dev, "\n"); mutex_lock(&dev->vb_queue_lock); mutex_lock(&dev->v4l2_lock); /* No need to keep the urbs around after disconnection */ dev->udev = NULL; v4l2_device_disconnect(&dev->v4l2_dev); video_unregister_device(&dev->tx_vdev); video_unregister_device(&dev->rx_vdev); mutex_unlock(&dev->v4l2_lock); mutex_unlock(&dev->vb_queue_lock); v4l2_device_put(&dev->v4l2_dev); } /* Videobuf2 operations */ static void hackrf_return_all_buffers(struct vb2_queue *vq, enum vb2_buffer_state state) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct usb_interface *intf = dev->intf; struct hackrf_buffer *buffer, *node; struct list_head *buffer_list; unsigned long flags; dev_dbg(&intf->dev, "\n"); if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) buffer_list = &dev->rx_buffer_list; else buffer_list = &dev->tx_buffer_list; spin_lock_irqsave(&dev->buffer_list_lock, flags); list_for_each_entry_safe(buffer, node, buffer_list, list) { dev_dbg(&intf->dev, "list_for_each_entry_safe\n"); vb2_buffer_done(&buffer->vb.vb2_buf, state); list_del(&buffer->list); } spin_unlock_irqrestore(&dev->buffer_list_lock, flags); } static int hackrf_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); unsigned int q_num_bufs = vb2_get_num_buffers(vq); dev_dbg(dev->dev, "nbuffers=%d\n", *nbuffers); /* Need at least 8 buffers */ if (q_num_bufs + *nbuffers < 8) *nbuffers = 8 - q_num_bufs; *nplanes = 1; sizes[0] = PAGE_ALIGN(dev->buffersize); dev_dbg(dev->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]); return 0; } static void hackrf_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vb2_queue *vq = vb->vb2_queue; struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct hackrf_buffer *buffer = container_of(vbuf, struct hackrf_buffer, vb); struct list_head *buffer_list; unsigned long flags; dev_dbg_ratelimited(&dev->intf->dev, "\n"); if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) buffer_list = &dev->rx_buffer_list; else buffer_list = &dev->tx_buffer_list; spin_lock_irqsave(&dev->buffer_list_lock, flags); list_add_tail(&buffer->list, buffer_list); spin_unlock_irqrestore(&dev->buffer_list_lock, flags); } static int hackrf_start_streaming(struct vb2_queue *vq, unsigned int count) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct usb_interface *intf = dev->intf; int ret; unsigned int mode; dev_dbg(&intf->dev, "count=%i\n", count); mutex_lock(&dev->v4l2_lock); /* Allow only RX or TX, not both same time */ if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) { if (test_bit(TX_ON, &dev->flags)) { ret = -EBUSY; goto err_hackrf_return_all_buffers; } mode = 1; set_bit(RX_ON, &dev->flags); } else { if (test_bit(RX_ON, &dev->flags)) { ret = -EBUSY; goto err_hackrf_return_all_buffers; } mode = 2; set_bit(TX_ON, &dev->flags); } dev->sequence = 0; ret = hackrf_alloc_stream_bufs(dev); if (ret) goto err; ret = hackrf_alloc_urbs(dev, (mode == 1)); if (ret) goto err; ret = hackrf_submit_urbs(dev); if (ret) goto err; ret = hackrf_set_params(dev); if (ret) goto err; /* start hardware streaming */ ret = hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, mode, 0, NULL, 0); if (ret) goto err; mutex_unlock(&dev->v4l2_lock); return 0; err: hackrf_kill_urbs(dev); hackrf_free_urbs(dev); hackrf_free_stream_bufs(dev); clear_bit(RX_ON, &dev->flags); clear_bit(TX_ON, &dev->flags); err_hackrf_return_all_buffers: hackrf_return_all_buffers(vq, VB2_BUF_STATE_QUEUED); mutex_unlock(&dev->v4l2_lock); dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static void hackrf_stop_streaming(struct vb2_queue *vq) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct usb_interface *intf = dev->intf; dev_dbg(&intf->dev, "\n"); mutex_lock(&dev->v4l2_lock); /* stop hardware streaming */ hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, 0, 0, NULL, 0); hackrf_kill_urbs(dev); hackrf_free_urbs(dev); hackrf_free_stream_bufs(dev); hackrf_return_all_buffers(vq, VB2_BUF_STATE_ERROR); if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) clear_bit(RX_ON, &dev->flags); else clear_bit(TX_ON, &dev->flags); mutex_unlock(&dev->v4l2_lock); } static const struct vb2_ops hackrf_vb2_ops = { .queue_setup = hackrf_queue_setup, .buf_queue = hackrf_buf_queue, .start_streaming = hackrf_start_streaming, .stop_streaming = hackrf_stop_streaming, }; static int hackrf_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct hackrf_dev *dev = video_drvdata(file); struct usb_interface *intf = dev->intf; dev_dbg(&intf->dev, "\n"); cap->capabilities = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS; strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, dev->rx_vdev.name, sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int hackrf_s_fmt_sdr(struct file *file, void *priv, struct v4l2_format *f) { struct hackrf_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); struct vb2_queue *q; int i; dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&f->fmt.sdr.pixelformat); if (vdev->vfl_dir == VFL_DIR_RX) q = &dev->rx_vb2_queue; else q = &dev->tx_vb2_queue; if (vb2_is_busy(q)) return -EBUSY; for (i = 0; i < NUM_FORMATS; i++) { if (f->fmt.sdr.pixelformat == formats[i].pixelformat) { dev->pixelformat = formats[i].pixelformat; dev->buffersize = formats[i].buffersize; f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } dev->pixelformat = formats[0].pixelformat; dev->buffersize = formats[0].buffersize; f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int hackrf_g_fmt_sdr(struct file *file, void *priv, struct v4l2_format *f) { struct hackrf_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&dev->pixelformat); f->fmt.sdr.pixelformat = dev->pixelformat; f->fmt.sdr.buffersize = dev->buffersize; return 0; } static int hackrf_try_fmt_sdr(struct file *file, void *priv, struct v4l2_format *f) { struct hackrf_dev *dev = video_drvdata(file); int i; dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&f->fmt.sdr.pixelformat); for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int hackrf_enum_fmt_sdr(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct hackrf_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "index=%d\n", f->index); if (f->index >= NUM_FORMATS) return -EINVAL; f->pixelformat = formats[f->index].pixelformat; return 0; } static int hackrf_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", v->index); if (v->index == 0) ret = 0; else if (v->index == 1) ret = 0; else ret = -EINVAL; return ret; } static int hackrf_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", v->index); if (v->index == 0) { strscpy(v->name, "HackRF ADC", sizeof(v->name)); v->type = V4L2_TUNER_SDR; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands_adc_dac[0].rangelow; v->rangehigh = bands_adc_dac[0].rangehigh; ret = 0; } else if (v->index == 1) { strscpy(v->name, "HackRF RF", sizeof(v->name)); v->type = V4L2_TUNER_RF; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands_rx_tx[0].rangelow; v->rangehigh = bands_rx_tx[0].rangehigh; ret = 0; } else { ret = -EINVAL; } return ret; } static int hackrf_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a) { struct hackrf_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "index=%d\n", a->index); return a->index > 1 ? -EINVAL : 0; } static int hackrf_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", a->index); if (a->index == 0) { strscpy(a->name, "HackRF DAC", sizeof(a->name)); a->type = V4L2_TUNER_SDR; a->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; a->rangelow = bands_adc_dac[0].rangelow; a->rangehigh = bands_adc_dac[0].rangehigh; ret = 0; } else if (a->index == 1) { strscpy(a->name, "HackRF RF", sizeof(a->name)); a->type = V4L2_TUNER_RF; a->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; a->rangelow = bands_rx_tx[0].rangelow; a->rangehigh = bands_rx_tx[0].rangehigh; ret = 0; } else { ret = -EINVAL; } return ret; } static int hackrf_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct hackrf_dev *dev = video_drvdata(file); struct usb_interface *intf = dev->intf; struct video_device *vdev = video_devdata(file); int ret; unsigned int uitmp; dev_dbg(&intf->dev, "tuner=%d type=%d frequency=%u\n", f->tuner, f->type, f->frequency); if (f->tuner == 0) { uitmp = clamp(f->frequency, bands_adc_dac[0].rangelow, bands_adc_dac[0].rangehigh); if (vdev->vfl_dir == VFL_DIR_RX) { dev->f_adc = uitmp; set_bit(RX_ADC_FREQUENCY, &dev->flags); } else { dev->f_dac = uitmp; set_bit(TX_DAC_FREQUENCY, &dev->flags); } } else if (f->tuner == 1) { uitmp = clamp(f->frequency, bands_rx_tx[0].rangelow, bands_rx_tx[0].rangehigh); if (vdev->vfl_dir == VFL_DIR_RX) { dev->f_rx = uitmp; set_bit(RX_RF_FREQUENCY, &dev->flags); } else { dev->f_tx = uitmp; set_bit(TX_RF_FREQUENCY, &dev->flags); } } else { ret = -EINVAL; goto err; } ret = hackrf_set_params(dev); if (ret) goto err; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int hackrf_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct hackrf_dev *dev = video_drvdata(file); struct usb_interface *intf = dev->intf; struct video_device *vdev = video_devdata(file); int ret; dev_dbg(dev->dev, "tuner=%d type=%d\n", f->tuner, f->type); if (f->tuner == 0) { f->type = V4L2_TUNER_SDR; if (vdev->vfl_dir == VFL_DIR_RX) f->frequency = dev->f_adc; else f->frequency = dev->f_dac; } else if (f->tuner == 1) { f->type = V4L2_TUNER_RF; if (vdev->vfl_dir == VFL_DIR_RX) f->frequency = dev->f_rx; else f->frequency = dev->f_tx; } else { ret = -EINVAL; goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int hackrf_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "tuner=%d type=%d index=%d\n", band->tuner, band->type, band->index); if (band->tuner == 0) { if (band->index >= ARRAY_SIZE(bands_adc_dac)) { ret = -EINVAL; } else { *band = bands_adc_dac[band->index]; ret = 0; } } else if (band->tuner == 1) { if (band->index >= ARRAY_SIZE(bands_rx_tx)) { ret = -EINVAL; } else { *band = bands_rx_tx[band->index]; ret = 0; } } else { ret = -EINVAL; } return ret; } static const struct v4l2_ioctl_ops hackrf_ioctl_ops = { .vidioc_querycap = hackrf_querycap, .vidioc_s_fmt_sdr_cap = hackrf_s_fmt_sdr, .vidioc_g_fmt_sdr_cap = hackrf_g_fmt_sdr, .vidioc_enum_fmt_sdr_cap = hackrf_enum_fmt_sdr, .vidioc_try_fmt_sdr_cap = hackrf_try_fmt_sdr, .vidioc_s_fmt_sdr_out = hackrf_s_fmt_sdr, .vidioc_g_fmt_sdr_out = hackrf_g_fmt_sdr, .vidioc_enum_fmt_sdr_out = hackrf_enum_fmt_sdr, .vidioc_try_fmt_sdr_out = hackrf_try_fmt_sdr, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_s_tuner = hackrf_s_tuner, .vidioc_g_tuner = hackrf_g_tuner, .vidioc_s_modulator = hackrf_s_modulator, .vidioc_g_modulator = hackrf_g_modulator, .vidioc_s_frequency = hackrf_s_frequency, .vidioc_g_frequency = hackrf_g_frequency, .vidioc_enum_freq_bands = hackrf_enum_freq_bands, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_log_status = v4l2_ctrl_log_status, }; static const struct v4l2_file_operations hackrf_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .write = vb2_fop_write, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct video_device hackrf_template = { .name = "HackRF One", .release = video_device_release_empty, .fops = &hackrf_fops, .ioctl_ops = &hackrf_ioctl_ops, }; static void hackrf_video_release(struct v4l2_device *v) { struct hackrf_dev *dev = container_of(v, struct hackrf_dev, v4l2_dev); dev_dbg(dev->dev, "\n"); v4l2_ctrl_handler_free(&dev->rx_ctrl_handler); v4l2_ctrl_handler_free(&dev->tx_ctrl_handler); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); } static int hackrf_s_ctrl_rx(struct v4l2_ctrl *ctrl) { struct hackrf_dev *dev = container_of(ctrl->handler, struct hackrf_dev, rx_ctrl_handler); struct usb_interface *intf = dev->intf; int ret; switch (ctrl->id) { case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: case V4L2_CID_RF_TUNER_BANDWIDTH: set_bit(RX_BANDWIDTH, &dev->flags); break; case V4L2_CID_RF_TUNER_RF_GAIN: set_bit(RX_RF_GAIN, &dev->flags); break; case V4L2_CID_RF_TUNER_LNA_GAIN: set_bit(RX_LNA_GAIN, &dev->flags); break; case V4L2_CID_RF_TUNER_IF_GAIN: set_bit(RX_IF_GAIN, &dev->flags); break; default: dev_dbg(&intf->dev, "unknown ctrl: id=%d name=%s\n", ctrl->id, ctrl->name); ret = -EINVAL; goto err; } ret = hackrf_set_params(dev); if (ret) goto err; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int hackrf_s_ctrl_tx(struct v4l2_ctrl *ctrl) { struct hackrf_dev *dev = container_of(ctrl->handler, struct hackrf_dev, tx_ctrl_handler); struct usb_interface *intf = dev->intf; int ret; switch (ctrl->id) { case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: case V4L2_CID_RF_TUNER_BANDWIDTH: set_bit(TX_BANDWIDTH, &dev->flags); break; case V4L2_CID_RF_TUNER_LNA_GAIN: set_bit(TX_LNA_GAIN, &dev->flags); break; case V4L2_CID_RF_TUNER_RF_GAIN: set_bit(TX_RF_GAIN, &dev->flags); break; default: dev_dbg(&intf->dev, "unknown ctrl: id=%d name=%s\n", ctrl->id, ctrl->name); ret = -EINVAL; goto err; } ret = hackrf_set_params(dev); if (ret) goto err; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static const struct v4l2_ctrl_ops hackrf_ctrl_ops_rx = { .s_ctrl = hackrf_s_ctrl_rx, }; static const struct v4l2_ctrl_ops hackrf_ctrl_ops_tx = { .s_ctrl = hackrf_s_ctrl_tx, }; static int hackrf_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct hackrf_dev *dev; int ret; u8 u8tmp, buf[BUF_SIZE]; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err; } mutex_init(&dev->v4l2_lock); mutex_init(&dev->vb_queue_lock); spin_lock_init(&dev->buffer_list_lock); INIT_LIST_HEAD(&dev->rx_buffer_list); INIT_LIST_HEAD(&dev->tx_buffer_list); dev->intf = intf; dev->dev = &intf->dev; dev->udev = interface_to_usbdev(intf); dev->pixelformat = formats[0].pixelformat; dev->buffersize = formats[0].buffersize; dev->f_adc = bands_adc_dac[0].rangelow; dev->f_dac = bands_adc_dac[0].rangelow; dev->f_rx = bands_rx_tx[0].rangelow; dev->f_tx = bands_rx_tx[0].rangelow; set_bit(RX_ADC_FREQUENCY, &dev->flags); set_bit(TX_DAC_FREQUENCY, &dev->flags); set_bit(RX_RF_FREQUENCY, &dev->flags); set_bit(TX_RF_FREQUENCY, &dev->flags); /* Detect device */ ret = hackrf_ctrl_msg(dev, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1); if (ret == 0) ret = hackrf_ctrl_msg(dev, CMD_VERSION_STRING_READ, 0, 0, buf, BUF_SIZE); if (ret) { dev_err(dev->dev, "Could not detect board\n"); goto err_kfree; } buf[BUF_SIZE - 1] = '\0'; dev_info(dev->dev, "Board ID: %02x\n", u8tmp); dev_info(dev->dev, "Firmware version: %s\n", buf); /* Init vb2 queue structure for receiver */ dev->rx_vb2_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE; dev->rx_vb2_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; dev->rx_vb2_queue.ops = &hackrf_vb2_ops; dev->rx_vb2_queue.mem_ops = &vb2_vmalloc_memops; dev->rx_vb2_queue.drv_priv = dev; dev->rx_vb2_queue.buf_struct_size = sizeof(struct hackrf_buffer); dev->rx_vb2_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; dev->rx_vb2_queue.lock = &dev->vb_queue_lock; ret = vb2_queue_init(&dev->rx_vb2_queue); if (ret) { dev_err(dev->dev, "Could not initialize rx vb2 queue\n"); goto err_kfree; } /* Init vb2 queue structure for transmitter */ dev->tx_vb2_queue.type = V4L2_BUF_TYPE_SDR_OUTPUT; dev->tx_vb2_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE; dev->tx_vb2_queue.ops = &hackrf_vb2_ops; dev->tx_vb2_queue.mem_ops = &vb2_vmalloc_memops; dev->tx_vb2_queue.drv_priv = dev; dev->tx_vb2_queue.buf_struct_size = sizeof(struct hackrf_buffer); dev->tx_vb2_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; dev->tx_vb2_queue.lock = &dev->vb_queue_lock; ret = vb2_queue_init(&dev->tx_vb2_queue); if (ret) { dev_err(dev->dev, "Could not initialize tx vb2 queue\n"); goto err_kfree; } /* Register controls for receiver */ v4l2_ctrl_handler_init(&dev->rx_ctrl_handler, 5); dev->rx_bandwidth_auto = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 0, 1); dev->rx_bandwidth = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_BANDWIDTH, 1750000, 28000000, 50000, 1750000); v4l2_ctrl_auto_cluster(2, &dev->rx_bandwidth_auto, 0, false); dev->rx_rf_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_RF_GAIN, 0, 12, 12, 0); dev->rx_lna_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 40, 8, 0); dev->rx_if_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_IF_GAIN, 0, 62, 2, 0); if (dev->rx_ctrl_handler.error) { ret = dev->rx_ctrl_handler.error; dev_err(dev->dev, "Could not initialize controls\n"); goto err_v4l2_ctrl_handler_free_rx; } v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl); v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); /* Register controls for transmitter */ v4l2_ctrl_handler_init(&dev->tx_ctrl_handler, 4); dev->tx_bandwidth_auto = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 0, 1); dev->tx_bandwidth = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_BANDWIDTH, 1750000, 28000000, 50000, 1750000); v4l2_ctrl_auto_cluster(2, &dev->tx_bandwidth_auto, 0, false); dev->tx_lna_gain = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 47, 1, 0); dev->tx_rf_gain = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_RF_GAIN, 0, 15, 15, 0); if (dev->tx_ctrl_handler.error) { ret = dev->tx_ctrl_handler.error; dev_err(dev->dev, "Could not initialize controls\n"); goto err_v4l2_ctrl_handler_free_tx; } v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl); v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); /* Register the v4l2_device structure */ dev->v4l2_dev.release = hackrf_video_release; ret = v4l2_device_register(&intf->dev, &dev->v4l2_dev); if (ret) { dev_err(dev->dev, "Failed to register v4l2-device (%d)\n", ret); goto err_v4l2_ctrl_handler_free_tx; } /* Init video_device structure for receiver */ dev->rx_vdev = hackrf_template; dev->rx_vdev.queue = &dev->rx_vb2_queue; dev->rx_vdev.v4l2_dev = &dev->v4l2_dev; dev->rx_vdev.ctrl_handler = &dev->rx_ctrl_handler; dev->rx_vdev.lock = &dev->v4l2_lock; dev->rx_vdev.vfl_dir = VFL_DIR_RX; dev->rx_vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER; video_set_drvdata(&dev->rx_vdev, dev); ret = video_register_device(&dev->rx_vdev, VFL_TYPE_SDR, -1); if (ret) { dev_err(dev->dev, "Failed to register as video device (%d)\n", ret); goto err_v4l2_device_unregister; } dev_info(dev->dev, "Registered as %s\n", video_device_node_name(&dev->rx_vdev)); /* Init video_device structure for transmitter */ dev->tx_vdev = hackrf_template; dev->tx_vdev.queue = &dev->tx_vb2_queue; dev->tx_vdev.v4l2_dev = &dev->v4l2_dev; dev->tx_vdev.ctrl_handler = &dev->tx_ctrl_handler; dev->tx_vdev.lock = &dev->v4l2_lock; dev->tx_vdev.vfl_dir = VFL_DIR_TX; dev->tx_vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR; video_set_drvdata(&dev->tx_vdev, dev); ret = video_register_device(&dev->tx_vdev, VFL_TYPE_SDR, -1); if (ret) { dev_err(dev->dev, "Failed to register as video device (%d)\n", ret); goto err_video_unregister_device_rx; } dev_info(dev->dev, "Registered as %s\n", video_device_node_name(&dev->tx_vdev)); dev_notice(dev->dev, "SDR API is still slightly experimental and functionality changes may follow\n"); return 0; err_video_unregister_device_rx: video_unregister_device(&dev->rx_vdev); err_v4l2_device_unregister: v4l2_device_unregister(&dev->v4l2_dev); err_v4l2_ctrl_handler_free_tx: v4l2_ctrl_handler_free(&dev->tx_ctrl_handler); err_v4l2_ctrl_handler_free_rx: v4l2_ctrl_handler_free(&dev->rx_ctrl_handler); err_kfree: kfree(dev); err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } /* USB device ID list */ static const struct usb_device_id hackrf_id_table[] = { { USB_DEVICE(0x1d50, 0x6089) }, /* HackRF One */ { } }; MODULE_DEVICE_TABLE(usb, hackrf_id_table); /* USB subsystem interface */ static struct usb_driver hackrf_driver = { .name = KBUILD_MODNAME, .probe = hackrf_probe, .disconnect = hackrf_disconnect, .id_table = hackrf_id_table, }; module_usb_driver(hackrf_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("HackRF"); MODULE_LICENSE("GPL"); |
7 7 7 7 1113 1117 1117 1118 1115 1113 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> */ /* * fsnotify inode mark locking/lifetime/and refcnting * * REFCNT: * The group->recnt and mark->refcnt tell how many "things" in the kernel * currently are referencing the objects. Both kind of objects typically will * live inside the kernel with a refcnt of 2, one for its creation and one for * the reference a group and a mark hold to each other. * If you are holding the appropriate locks, you can take a reference and the * object itself is guaranteed to survive until the reference is dropped. * * LOCKING: * There are 3 locks involved with fsnotify inode marks and they MUST be taken * in order as follows: * * group->mark_mutex * mark->lock * mark->connector->lock * * group->mark_mutex protects the marks_list anchored inside a given group and * each mark is hooked via the g_list. It also protects the groups private * data (i.e group limits). * mark->lock protects the marks attributes like its masks and flags. * Furthermore it protects the access to a reference of the group that the mark * is assigned to as well as the access to a reference of the inode/vfsmount * that is being watched by the mark. * * mark->connector->lock protects the list of marks anchored inside an * inode / vfsmount and each mark is hooked via the i_list. * * A list of notification marks relating to inode / mnt is contained in * fsnotify_mark_connector. That structure is alive as long as there are any * marks in the list and is also protected by fsnotify_mark_srcu. A mark gets * detached from fsnotify_mark_connector when last reference to the mark is * dropped. Thus having mark reference is enough to protect mark->connector * pointer and to make sure fsnotify_mark_connector cannot disappear. Also * because we remove mark from g_list before dropping mark reference associated * with that, any mark found through g_list is guaranteed to have * mark->connector set until we drop group->mark_mutex. * * LIFETIME: * Inode marks survive between when they are added to an inode and when their * refcnt==0. Marks are also protected by fsnotify_mark_srcu. * * The inode mark can be cleared for a number of different reasons including: * - The inode is unlinked for the last time. (fsnotify_inode_remove) * - The inode is being evicted from cache. (fsnotify_inode_delete) * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes) * - Something explicitly requests that it be removed. (fsnotify_destroy_mark) * - The fsnotify_group associated with the mark is going away and all such marks * need to be cleaned up. (fsnotify_clear_marks_by_group) * * This has the very interesting property of being able to run concurrently with * any (or all) other directions. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/srcu.h> #include <linux/ratelimit.h> #include <linux/atomic.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" #define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */ struct srcu_struct fsnotify_mark_srcu; struct kmem_cache *fsnotify_mark_connector_cachep; static DEFINE_SPINLOCK(destroy_lock); static LIST_HEAD(destroy_list); static struct fsnotify_mark_connector *connector_destroy_list; static void fsnotify_mark_destroy_workfn(struct work_struct *work); static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn); static void fsnotify_connector_destroy_workfn(struct work_struct *work); static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn); void fsnotify_get_mark(struct fsnotify_mark *mark) { WARN_ON_ONCE(!refcount_read(&mark->refcnt)); refcount_inc(&mark->refcnt); } static fsnotify_connp_t *fsnotify_object_connp(void *obj, enum fsnotify_obj_type obj_type) { switch (obj_type) { case FSNOTIFY_OBJ_TYPE_INODE: return &((struct inode *)obj)->i_fsnotify_marks; case FSNOTIFY_OBJ_TYPE_VFSMOUNT: return &real_mount(obj)->mnt_fsnotify_marks; case FSNOTIFY_OBJ_TYPE_SB: return fsnotify_sb_marks(obj); case FSNOTIFY_OBJ_TYPE_MNTNS: return &((struct mnt_namespace *)obj)->n_fsnotify_marks; default: return NULL; } } static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn) { if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) return &fsnotify_conn_inode(conn)->i_fsnotify_mask; else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask; else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) return &fsnotify_conn_sb(conn)->s_fsnotify_mask; else if (conn->type == FSNOTIFY_OBJ_TYPE_MNTNS) return &fsnotify_conn_mntns(conn)->n_fsnotify_mask; return NULL; } __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn) { if (WARN_ON(!fsnotify_valid_obj_type(conn->type))) return 0; return READ_ONCE(*fsnotify_conn_mask_p(conn)); } static void fsnotify_get_sb_watched_objects(struct super_block *sb) { atomic_long_inc(fsnotify_sb_watched_objects(sb)); } static void fsnotify_put_sb_watched_objects(struct super_block *sb) { atomic_long_t *watched_objects = fsnotify_sb_watched_objects(sb); /* the superblock can go away after this decrement */ if (atomic_long_dec_and_test(watched_objects)) wake_up_var(watched_objects); } static void fsnotify_get_inode_ref(struct inode *inode) { ihold(inode); fsnotify_get_sb_watched_objects(inode->i_sb); } static void fsnotify_put_inode_ref(struct inode *inode) { /* read ->i_sb before the inode can go away */ struct super_block *sb = inode->i_sb; iput(inode); fsnotify_put_sb_watched_objects(sb); } /* * Grab or drop watched objects reference depending on whether the connector * is attached and has any marks attached. */ static void fsnotify_update_sb_watchers(struct super_block *sb, struct fsnotify_mark_connector *conn) { struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); bool is_watched = conn->flags & FSNOTIFY_CONN_FLAG_IS_WATCHED; struct fsnotify_mark *first_mark = NULL; unsigned int highest_prio = 0; if (conn->obj) first_mark = hlist_entry_safe(conn->list.first, struct fsnotify_mark, obj_list); if (first_mark) highest_prio = first_mark->group->priority; if (WARN_ON(highest_prio >= __FSNOTIFY_PRIO_NUM)) highest_prio = 0; /* * If the highest priority of group watching this object is prio, * then watched object has a reference on counters [0..prio]. * Update priority >= 1 watched objects counters. */ for (unsigned int p = conn->prio + 1; p <= highest_prio; p++) atomic_long_inc(&sbinfo->watched_objects[p]); for (unsigned int p = conn->prio; p > highest_prio; p--) atomic_long_dec(&sbinfo->watched_objects[p]); conn->prio = highest_prio; /* Update priority >= 0 (a.k.a total) watched objects counter */ BUILD_BUG_ON(FSNOTIFY_PRIO_NORMAL != 0); if (first_mark && !is_watched) { conn->flags |= FSNOTIFY_CONN_FLAG_IS_WATCHED; fsnotify_get_sb_watched_objects(sb); } else if (!first_mark && is_watched) { conn->flags &= ~FSNOTIFY_CONN_FLAG_IS_WATCHED; fsnotify_put_sb_watched_objects(sb); } } /* * Grab or drop inode reference for the connector if needed. * * When it's time to drop the reference, we only clear the HAS_IREF flag and * return the inode object. fsnotify_drop_object() will be resonsible for doing * iput() outside of spinlocks. This happens when last mark that wanted iref is * detached. */ static struct inode *fsnotify_update_iref(struct fsnotify_mark_connector *conn, bool want_iref) { bool has_iref = conn->flags & FSNOTIFY_CONN_FLAG_HAS_IREF; struct inode *inode = NULL; if (conn->type != FSNOTIFY_OBJ_TYPE_INODE || want_iref == has_iref) return NULL; if (want_iref) { /* Pin inode if any mark wants inode refcount held */ fsnotify_get_inode_ref(fsnotify_conn_inode(conn)); conn->flags |= FSNOTIFY_CONN_FLAG_HAS_IREF; } else { /* Unpin inode after detach of last mark that wanted iref */ inode = fsnotify_conn_inode(conn); conn->flags &= ~FSNOTIFY_CONN_FLAG_HAS_IREF; } return inode; } static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) { u32 new_mask = 0; bool want_iref = false; struct fsnotify_mark *mark; assert_spin_locked(&conn->lock); /* We can get detached connector here when inode is getting unlinked. */ if (!fsnotify_valid_obj_type(conn->type)) return NULL; hlist_for_each_entry(mark, &conn->list, obj_list) { if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) continue; new_mask |= fsnotify_calc_mask(mark); if (conn->type == FSNOTIFY_OBJ_TYPE_INODE && !(mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) want_iref = true; } /* * We use WRITE_ONCE() to prevent silly compiler optimizations from * confusing readers not holding conn->lock with partial updates. */ WRITE_ONCE(*fsnotify_conn_mask_p(conn), new_mask); return fsnotify_update_iref(conn, want_iref); } static bool fsnotify_conn_watches_children( struct fsnotify_mark_connector *conn) { if (conn->type != FSNOTIFY_OBJ_TYPE_INODE) return false; return fsnotify_inode_watches_children(fsnotify_conn_inode(conn)); } static void fsnotify_conn_set_children_dentry_flags( struct fsnotify_mark_connector *conn) { if (conn->type != FSNOTIFY_OBJ_TYPE_INODE) return; fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn)); } /* * Calculate mask of events for a list of marks. The caller must make sure * connector and connector->obj cannot disappear under us. Callers achieve * this by holding a mark->lock or mark->group->mark_mutex for a mark on this * list. */ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) { bool update_children; if (!conn) return; spin_lock(&conn->lock); update_children = !fsnotify_conn_watches_children(conn); __fsnotify_recalc_mask(conn); update_children &= fsnotify_conn_watches_children(conn); spin_unlock(&conn->lock); /* * Set children's PARENT_WATCHED flags only if parent started watching. * When parent stops watching, we clear false positive PARENT_WATCHED * flags lazily in __fsnotify_parent(). */ if (update_children) fsnotify_conn_set_children_dentry_flags(conn); } /* Free all connectors queued for freeing once SRCU period ends */ static void fsnotify_connector_destroy_workfn(struct work_struct *work) { struct fsnotify_mark_connector *conn, *free; spin_lock(&destroy_lock); conn = connector_destroy_list; connector_destroy_list = NULL; spin_unlock(&destroy_lock); synchronize_srcu(&fsnotify_mark_srcu); while (conn) { free = conn; conn = conn->destroy_next; kmem_cache_free(fsnotify_mark_connector_cachep, free); } } static void *fsnotify_detach_connector_from_object( struct fsnotify_mark_connector *conn, unsigned int *type) { fsnotify_connp_t *connp = fsnotify_object_connp(conn->obj, conn->type); struct super_block *sb = fsnotify_connector_sb(conn); struct inode *inode = NULL; *type = conn->type; if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) return NULL; if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) { inode = fsnotify_conn_inode(conn); inode->i_fsnotify_mask = 0; /* Unpin inode when detaching from connector */ if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_IREF)) inode = NULL; } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) { fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0; } else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) { fsnotify_conn_sb(conn)->s_fsnotify_mask = 0; } else if (conn->type == FSNOTIFY_OBJ_TYPE_MNTNS) { fsnotify_conn_mntns(conn)->n_fsnotify_mask = 0; } rcu_assign_pointer(*connp, NULL); conn->obj = NULL; conn->type = FSNOTIFY_OBJ_TYPE_DETACHED; if (sb) fsnotify_update_sb_watchers(sb, conn); return inode; } static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark) { struct fsnotify_group *group = mark->group; if (WARN_ON_ONCE(!group)) return; group->ops->free_mark(mark); fsnotify_put_group(group); } /* Drop object reference originally held by a connector */ static void fsnotify_drop_object(unsigned int type, void *objp) { if (!objp) return; /* Currently only inode references are passed to be dropped */ if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE)) return; fsnotify_put_inode_ref(objp); } void fsnotify_put_mark(struct fsnotify_mark *mark) { struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector); void *objp = NULL; unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED; bool free_conn = false; /* Catch marks that were actually never attached to object */ if (!conn) { if (refcount_dec_and_test(&mark->refcnt)) fsnotify_final_mark_destroy(mark); return; } /* * We have to be careful so that traversals of obj_list under lock can * safely grab mark reference. */ if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock)) return; hlist_del_init_rcu(&mark->obj_list); if (hlist_empty(&conn->list)) { objp = fsnotify_detach_connector_from_object(conn, &type); free_conn = true; } else { struct super_block *sb = fsnotify_connector_sb(conn); /* Update watched objects after detaching mark */ if (sb) fsnotify_update_sb_watchers(sb, conn); objp = __fsnotify_recalc_mask(conn); type = conn->type; } WRITE_ONCE(mark->connector, NULL); spin_unlock(&conn->lock); fsnotify_drop_object(type, objp); if (free_conn) { spin_lock(&destroy_lock); conn->destroy_next = connector_destroy_list; connector_destroy_list = conn; spin_unlock(&destroy_lock); queue_work(system_unbound_wq, &connector_reaper_work); } /* * Note that we didn't update flags telling whether inode cares about * what's happening with children. We update these flags from * __fsnotify_parent() lazily when next event happens on one of our * children. */ spin_lock(&destroy_lock); list_add(&mark->g_list, &destroy_list); spin_unlock(&destroy_lock); queue_delayed_work(system_unbound_wq, &reaper_work, FSNOTIFY_REAPER_DELAY); } EXPORT_SYMBOL_GPL(fsnotify_put_mark); /* * Get mark reference when we found the mark via lockless traversal of object * list. Mark can be already removed from the list by now and on its way to be * destroyed once SRCU period ends. * * Also pin the group so it doesn't disappear under us. */ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) { if (!mark) return true; if (refcount_inc_not_zero(&mark->refcnt)) { spin_lock(&mark->lock); if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { /* mark is attached, group is still alive then */ atomic_inc(&mark->group->user_waits); spin_unlock(&mark->lock); return true; } spin_unlock(&mark->lock); fsnotify_put_mark(mark); } return false; } /* * Puts marks and wakes up group destruction if necessary. * * Pairs with fsnotify_get_mark_safe() */ static void fsnotify_put_mark_wake(struct fsnotify_mark *mark) { if (mark) { struct fsnotify_group *group = mark->group; fsnotify_put_mark(mark); /* * We abuse notification_waitq on group shutdown for waiting for * all marks pinned when waiting for userspace. */ if (atomic_dec_and_test(&group->user_waits) && group->shutdown) wake_up(&group->notification_waitq); } } bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) __releases(&fsnotify_mark_srcu) { int type; fsnotify_foreach_iter_type(type) { /* This can fail if mark is being removed */ if (!fsnotify_get_mark_safe(iter_info->marks[type])) { __release(&fsnotify_mark_srcu); goto fail; } } /* * Now that both marks are pinned by refcount in the inode / vfsmount * lists, we can drop SRCU lock, and safely resume the list iteration * once userspace returns. */ srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx); return true; fail: for (type--; type >= 0; type--) fsnotify_put_mark_wake(iter_info->marks[type]); return false; } void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info) __acquires(&fsnotify_mark_srcu) { int type; iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); fsnotify_foreach_iter_type(type) fsnotify_put_mark_wake(iter_info->marks[type]); } /* * Mark mark as detached, remove it from group list. Mark still stays in object * list until its last reference is dropped. Note that we rely on mark being * removed from group list before corresponding reference to it is dropped. In * particular we rely on mark->connector being valid while we hold * group->mark_mutex if we found the mark through g_list. * * Must be called with group->mark_mutex held. The caller must either hold * reference to the mark or be protected by fsnotify_mark_srcu. */ void fsnotify_detach_mark(struct fsnotify_mark *mark) { fsnotify_group_assert_locked(mark->group); WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) && refcount_read(&mark->refcnt) < 1 + !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)); spin_lock(&mark->lock); /* something else already called this function on this mark */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { spin_unlock(&mark->lock); return; } mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED; list_del_init(&mark->g_list); spin_unlock(&mark->lock); /* Drop mark reference acquired in fsnotify_add_mark_locked() */ fsnotify_put_mark(mark); } /* * Free fsnotify mark. The mark is actually only marked as being freed. The * freeing is actually happening only once last reference to the mark is * dropped from a workqueue which first waits for srcu period end. * * Caller must have a reference to the mark or be protected by * fsnotify_mark_srcu. */ void fsnotify_free_mark(struct fsnotify_mark *mark) { struct fsnotify_group *group = mark->group; spin_lock(&mark->lock); /* something else already called this function on this mark */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { spin_unlock(&mark->lock); return; } mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; spin_unlock(&mark->lock); /* * Some groups like to know that marks are being freed. This is a * callback to the group function to let it know that this mark * is being freed. */ if (group->ops->freeing_mark) group->ops->freeing_mark(mark, group); } void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { fsnotify_group_lock(group); fsnotify_detach_mark(mark); fsnotify_group_unlock(group); fsnotify_free_mark(mark); } EXPORT_SYMBOL_GPL(fsnotify_destroy_mark); /* * Sorting function for lists of fsnotify marks. * * Fanotify supports different notification classes (reflected as priority of * notification group). Events shall be passed to notification groups in * decreasing priority order. To achieve this marks in notification lists for * inodes and vfsmounts are sorted so that priorities of corresponding groups * are descending. * * Furthermore correct handling of the ignore mask requires processing inode * and vfsmount marks of each group together. Using the group address as * further sort criterion provides a unique sorting order and thus we can * merge inode and vfsmount lists of marks in linear time and find groups * present in both lists. * * A return value of 1 signifies that b has priority over a. * A return value of 0 signifies that the two marks have to be handled together. * A return value of -1 signifies that a has priority over b. */ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b) { if (a == b) return 0; if (!a) return 1; if (!b) return -1; if (a->priority < b->priority) return 1; if (a->priority > b->priority) return -1; if (a < b) return 1; return -1; } static int fsnotify_attach_info_to_sb(struct super_block *sb) { struct fsnotify_sb_info *sbinfo; /* sb info is freed on fsnotify_sb_delete() */ sbinfo = kzalloc(sizeof(*sbinfo), GFP_KERNEL); if (!sbinfo) return -ENOMEM; /* * cmpxchg() provides the barrier so that callers of fsnotify_sb_info() * will observe an initialized structure */ if (cmpxchg(&sb->s_fsnotify_info, NULL, sbinfo)) { /* Someone else created sbinfo for us */ kfree(sbinfo); } return 0; } static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, void *obj, unsigned int obj_type) { struct fsnotify_mark_connector *conn; conn = kmem_cache_alloc(fsnotify_mark_connector_cachep, GFP_KERNEL); if (!conn) return -ENOMEM; spin_lock_init(&conn->lock); INIT_HLIST_HEAD(&conn->list); conn->flags = 0; conn->prio = 0; conn->type = obj_type; conn->obj = obj; /* * cmpxchg() provides the barrier so that readers of *connp can see * only initialized structure */ if (cmpxchg(connp, NULL, conn)) { /* Someone else created list structure for us */ kmem_cache_free(fsnotify_mark_connector_cachep, conn); } return 0; } /* * Get mark connector, make sure it is alive and return with its lock held. * This is for users that get connector pointer from inode or mount. Users that * hold reference to a mark on the list may directly lock connector->lock as * they are sure list cannot go away under them. */ static struct fsnotify_mark_connector *fsnotify_grab_connector( fsnotify_connp_t *connp) { struct fsnotify_mark_connector *conn; int idx; idx = srcu_read_lock(&fsnotify_mark_srcu); conn = srcu_dereference(*connp, &fsnotify_mark_srcu); if (!conn) goto out; spin_lock(&conn->lock); if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) { spin_unlock(&conn->lock); srcu_read_unlock(&fsnotify_mark_srcu, idx); return NULL; } out: srcu_read_unlock(&fsnotify_mark_srcu, idx); return conn; } /* * Add mark into proper place in given list of marks. These marks may be used * for the fsnotify backend to determine which event types should be delivered * to which group and for which inodes. These marks are ordered according to * priority, highest number first, and then by the group's location in memory. */ static int fsnotify_add_mark_list(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags) { struct super_block *sb = fsnotify_object_sb(obj, obj_type); struct fsnotify_mark *lmark, *last = NULL; struct fsnotify_mark_connector *conn; fsnotify_connp_t *connp; int cmp; int err = 0; if (WARN_ON(!fsnotify_valid_obj_type(obj_type))) return -EINVAL; /* * Attach the sb info before attaching a connector to any object on sb. * The sb info will remain attached as long as sb lives. */ if (sb && !fsnotify_sb_info(sb)) { err = fsnotify_attach_info_to_sb(sb); if (err) return err; } connp = fsnotify_object_connp(obj, obj_type); restart: spin_lock(&mark->lock); conn = fsnotify_grab_connector(connp); if (!conn) { spin_unlock(&mark->lock); err = fsnotify_attach_connector_to_object(connp, obj, obj_type); if (err) return err; goto restart; } /* is mark the first mark? */ if (hlist_empty(&conn->list)) { hlist_add_head_rcu(&mark->obj_list, &conn->list); goto added; } /* should mark be in the middle of the current list? */ hlist_for_each_entry(lmark, &conn->list, obj_list) { last = lmark; if ((lmark->group == mark->group) && (lmark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) && !(mark->group->flags & FSNOTIFY_GROUP_DUPS)) { err = -EEXIST; goto out_err; } cmp = fsnotify_compare_groups(lmark->group, mark->group); if (cmp >= 0) { hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list); goto added; } } BUG_ON(last == NULL); /* mark should be the last entry. last is the current last entry */ hlist_add_behind_rcu(&mark->obj_list, &last->obj_list); added: if (sb) fsnotify_update_sb_watchers(sb, conn); /* * Since connector is attached to object using cmpxchg() we are * guaranteed that connector initialization is fully visible by anyone * seeing mark->connector set. */ WRITE_ONCE(mark->connector, conn); out_err: spin_unlock(&conn->lock); spin_unlock(&mark->lock); return err; } /* * Attach an initialized mark to a given group and fs object. * These marks may be used for the fsnotify backend to determine which * event types should be delivered to which group. */ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags) { struct fsnotify_group *group = mark->group; int ret = 0; fsnotify_group_assert_locked(group); /* * LOCKING ORDER!!!! * group->mark_mutex * mark->lock * mark->connector->lock */ spin_lock(&mark->lock); mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED; list_add(&mark->g_list, &group->marks_list); fsnotify_get_mark(mark); /* for g_list */ spin_unlock(&mark->lock); ret = fsnotify_add_mark_list(mark, obj, obj_type, add_flags); if (ret) goto err; fsnotify_recalc_mask(mark->connector); return ret; err: spin_lock(&mark->lock); mark->flags &= ~(FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED); list_del_init(&mark->g_list); spin_unlock(&mark->lock); fsnotify_put_mark(mark); return ret; } int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags) { int ret; struct fsnotify_group *group = mark->group; fsnotify_group_lock(group); ret = fsnotify_add_mark_locked(mark, obj, obj_type, add_flags); fsnotify_group_unlock(group); return ret; } EXPORT_SYMBOL_GPL(fsnotify_add_mark); /* * Given a list of marks, find the mark associated with given group. If found * take a reference to that mark and return it, else return NULL. */ struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type, struct fsnotify_group *group) { fsnotify_connp_t *connp = fsnotify_object_connp(obj, obj_type); struct fsnotify_mark_connector *conn; struct fsnotify_mark *mark; if (!connp) return NULL; conn = fsnotify_grab_connector(connp); if (!conn) return NULL; hlist_for_each_entry(mark, &conn->list, obj_list) { if (mark->group == group && (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { fsnotify_get_mark(mark); spin_unlock(&conn->lock); return mark; } } spin_unlock(&conn->lock); return NULL; } EXPORT_SYMBOL_GPL(fsnotify_find_mark); /* Clear any marks in a group with given type mask */ void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int obj_type) { struct fsnotify_mark *lmark, *mark; LIST_HEAD(to_free); struct list_head *head = &to_free; /* Skip selection step if we want to clear all marks. */ if (obj_type == FSNOTIFY_OBJ_TYPE_ANY) { head = &group->marks_list; goto clear; } /* * We have to be really careful here. Anytime we drop mark_mutex, e.g. * fsnotify_clear_marks_by_inode() can come and free marks. Even in our * to_free list so we have to use mark_mutex even when accessing that * list. And freeing mark requires us to drop mark_mutex. So we can * reliably free only the first mark in the list. That's why we first * move marks to free to to_free list in one go and then free marks in * to_free list one by one. */ fsnotify_group_lock(group); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { if (mark->connector->type == obj_type) list_move(&mark->g_list, &to_free); } fsnotify_group_unlock(group); clear: while (1) { fsnotify_group_lock(group); if (list_empty(head)) { fsnotify_group_unlock(group); break; } mark = list_first_entry(head, struct fsnotify_mark, g_list); fsnotify_get_mark(mark); fsnotify_detach_mark(mark); fsnotify_group_unlock(group); fsnotify_free_mark(mark); fsnotify_put_mark(mark); } } /* Destroy all marks attached to an object via connector */ void fsnotify_destroy_marks(fsnotify_connp_t *connp) { struct fsnotify_mark_connector *conn; struct fsnotify_mark *mark, *old_mark = NULL; void *objp; unsigned int type; conn = fsnotify_grab_connector(connp); if (!conn) return; /* * We have to be careful since we can race with e.g. * fsnotify_clear_marks_by_group() and once we drop the conn->lock, the * list can get modified. However we are holding mark reference and * thus our mark cannot be removed from obj_list so we can continue * iteration after regaining conn->lock. */ hlist_for_each_entry(mark, &conn->list, obj_list) { fsnotify_get_mark(mark); spin_unlock(&conn->lock); if (old_mark) fsnotify_put_mark(old_mark); old_mark = mark; fsnotify_destroy_mark(mark, mark->group); spin_lock(&conn->lock); } /* * Detach list from object now so that we don't pin inode until all * mark references get dropped. It would lead to strange results such * as delaying inode deletion or blocking unmount. */ objp = fsnotify_detach_connector_from_object(conn, &type); spin_unlock(&conn->lock); if (old_mark) fsnotify_put_mark(old_mark); fsnotify_drop_object(type, objp); } /* * Nothing fancy, just initialize lists and locks and counters. */ void fsnotify_init_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { memset(mark, 0, sizeof(*mark)); spin_lock_init(&mark->lock); refcount_set(&mark->refcnt, 1); fsnotify_get_group(group); mark->group = group; WRITE_ONCE(mark->connector, NULL); } EXPORT_SYMBOL_GPL(fsnotify_init_mark); /* * Destroy all marks in destroy_list, waits for SRCU period to finish before * actually freeing marks. */ static void fsnotify_mark_destroy_workfn(struct work_struct *work) { struct fsnotify_mark *mark, *next; struct list_head private_destroy_list; spin_lock(&destroy_lock); /* exchange the list head */ list_replace_init(&destroy_list, &private_destroy_list); spin_unlock(&destroy_lock); synchronize_srcu(&fsnotify_mark_srcu); list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { list_del_init(&mark->g_list); fsnotify_final_mark_destroy(mark); } } /* Wait for all marks queued for destruction to be actually destroyed */ void fsnotify_wait_marks_destroyed(void) { flush_delayed_work(&reaper_work); } EXPORT_SYMBOL_GPL(fsnotify_wait_marks_destroyed); |
35 35 34 35 35 35 2 2 1 1 1 35 12 82 82 82 82 82 82 99 99 99 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | // SPDX-License-Identifier: GPL-2.0 /* dvb-usb-urb.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the * USB and URB stuff. */ #include "dvb-usb-common.h" int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen, int delay_ms) { int actlen = 0, ret = -ENOMEM; if (!d || wbuf == NULL || wlen == 0) return -EINVAL; if (d->props.generic_bulk_ctrl_endpoint == 0) { err("endpoint for generic control not specified."); return -EINVAL; } if ((ret = mutex_lock_interruptible(&d->usb_mutex))) return ret; deb_xfer(">>> "); debug_dump(wbuf,wlen,deb_xfer); ret = usb_bulk_msg(d->udev,usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint), wbuf,wlen,&actlen, 2000); if (ret) err("bulk message failed: %d (%d/%d)",ret,wlen,actlen); else ret = actlen != wlen ? -1 : 0; /* an answer is expected, and no error before */ if (!ret && rbuf && rlen) { if (delay_ms) msleep(delay_ms); ret = usb_bulk_msg(d->udev,usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint_response ? d->props.generic_bulk_ctrl_endpoint_response : d->props.generic_bulk_ctrl_endpoint),rbuf,rlen,&actlen, 2000); if (ret) err("recv bulk message failed: %d",ret); else { deb_xfer("<<< "); debug_dump(rbuf,actlen,deb_xfer); } } mutex_unlock(&d->usb_mutex); return ret; } EXPORT_SYMBOL(dvb_usb_generic_rw); int dvb_usb_generic_write(struct dvb_usb_device *d, u8 *buf, u16 len) { return dvb_usb_generic_rw(d,buf,len,NULL,0,0); } EXPORT_SYMBOL(dvb_usb_generic_write); static void dvb_usb_data_complete(struct usb_data_stream *stream, u8 *buffer, size_t length) { struct dvb_usb_adapter *adap = stream->user_priv; if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB) dvb_dmx_swfilter(&adap->demux, buffer, length); } static void dvb_usb_data_complete_204(struct usb_data_stream *stream, u8 *buffer, size_t length) { struct dvb_usb_adapter *adap = stream->user_priv; if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB) dvb_dmx_swfilter_204(&adap->demux, buffer, length); } static void dvb_usb_data_complete_raw(struct usb_data_stream *stream, u8 *buffer, size_t length) { struct dvb_usb_adapter *adap = stream->user_priv; if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB) dvb_dmx_swfilter_raw(&adap->demux, buffer, length); } int dvb_usb_adapter_stream_init(struct dvb_usb_adapter *adap) { int i, ret = 0; for (i = 0; i < adap->props.num_frontends; i++) { adap->fe_adap[i].stream.udev = adap->dev->udev; if (adap->props.fe[i].caps & DVB_USB_ADAP_RECEIVES_204_BYTE_TS) adap->fe_adap[i].stream.complete = dvb_usb_data_complete_204; else if (adap->props.fe[i].caps & DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD) adap->fe_adap[i].stream.complete = dvb_usb_data_complete_raw; else adap->fe_adap[i].stream.complete = dvb_usb_data_complete; adap->fe_adap[i].stream.user_priv = adap; ret = usb_urb_init(&adap->fe_adap[i].stream, &adap->props.fe[i].stream); if (ret < 0) break; } return ret; } int dvb_usb_adapter_stream_exit(struct dvb_usb_adapter *adap) { int i; for (i = 0; i < adap->props.num_frontends; i++) usb_urb_exit(&adap->fe_adap[i].stream); return 0; } |
14 14 14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 12 11 1 11 2 11 12 8 8 8 1 1 13 14 14 14 14 12 11 10 7 6 15 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland * Copyright (c) 2002, 2003 Tuukka Toivonen * Copyright (c) 2008 Erik Andrén * * P/N 861037: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam * P/N 861075-0040: Sensor HDCS1000 ASIC * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/input.h> #include "stv06xx_sensor.h" MODULE_AUTHOR("Erik Andrén"); MODULE_DESCRIPTION("STV06XX USB Camera Driver"); MODULE_LICENSE("GPL"); static bool dump_bridge; static bool dump_sensor; int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; u8 len = (i2c_data > 0xff) ? 2 : 1; buf[0] = i2c_data & 0xff; buf[1] = (i2c_data >> 8) & 0xff; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, address, 0, buf, len, STV06XX_URB_MSG_TIMEOUT); gspca_dbg(gspca_dev, D_CONF, "Written 0x%x to address 0x%x, status: %d\n", i2c_data, address, err); return (err < 0) ? err : 0; } int stv06xx_read_bridge(struct sd *sd, u16 address, u8 *i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, address, 0, buf, 1, STV06XX_URB_MSG_TIMEOUT); *i2c_data = buf[0]; gspca_dbg(gspca_dev, D_CONF, "Reading 0x%x from address 0x%x, status %d\n", *i2c_data, address, err); return (err < 0) ? err : 0; } /* Wraps the normal write sensor bytes / words functions for writing a single value */ int stv06xx_write_sensor(struct sd *sd, u8 address, u16 value) { if (sd->sensor->i2c_len == 2) { u16 data[2] = { address, value }; return stv06xx_write_sensor_words(sd, data, 1); } else { u8 data[2] = { address, value }; return stv06xx_write_sensor_bytes(sd, data, 1); } } static int stv06xx_write_sensor_finish(struct sd *sd) { int err = 0; if (sd->bridge == BRIDGE_STV610) { struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; buf[0] = 0; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x1704, 0, buf, 1, STV06XX_URB_MSG_TIMEOUT); } return (err < 0) ? err : 0; } int stv06xx_write_sensor_bytes(struct sd *sd, const u8 *data, u8 len) { int err, i, j; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; gspca_dbg(gspca_dev, D_CONF, "I2C: Command buffer contains %d entries\n", len); for (i = 0; i < len;) { /* Build the command buffer */ memset(buf, 0, I2C_BUFFER_LENGTH); for (j = 0; j < I2C_MAX_BYTES && i < len; j++, i++) { buf[j] = data[2*i]; buf[0x10 + j] = data[2*i+1]; gspca_dbg(gspca_dev, D_CONF, "I2C: Writing 0x%02x to reg 0x%02x\n", data[2*i+1], data[2*i]); } buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = j - 1; /* Number of commands to send - 1 */ buf[0x22] = I2C_WRITE_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x0400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) return err; } return stv06xx_write_sensor_finish(sd); } int stv06xx_write_sensor_words(struct sd *sd, const u16 *data, u8 len) { int err, i, j; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; gspca_dbg(gspca_dev, D_CONF, "I2C: Command buffer contains %d entries\n", len); for (i = 0; i < len;) { /* Build the command buffer */ memset(buf, 0, I2C_BUFFER_LENGTH); for (j = 0; j < I2C_MAX_WORDS && i < len; j++, i++) { buf[j] = data[2*i]; buf[0x10 + j * 2] = data[2*i+1]; buf[0x10 + j * 2 + 1] = data[2*i+1] >> 8; gspca_dbg(gspca_dev, D_CONF, "I2C: Writing 0x%04x to reg 0x%02x\n", data[2*i+1], data[2*i]); } buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = j - 1; /* Number of commands to send - 1 */ buf[0x22] = I2C_WRITE_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x0400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) return err; } return stv06xx_write_sensor_finish(sd); } int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = stv06xx_write_bridge(sd, STV_I2C_FLUSH, sd->sensor->i2c_flush); if (err < 0) return err; /* Clear mem */ memset(buf, 0, I2C_BUFFER_LENGTH); buf[0] = address; buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = 0; /* Read I2C register */ buf[0x22] = I2C_READ_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x1400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) { pr_err("I2C: Read error writing address: %d\n", err); return err; } err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, 0x1410, 0, buf, sd->sensor->i2c_len, STV06XX_URB_MSG_TIMEOUT); if (sd->sensor->i2c_len == 2) *value = buf[0] | (buf[1] << 8); else *value = buf[0]; gspca_dbg(gspca_dev, D_CONF, "I2C: Read 0x%x from address 0x%x, status: %d\n", *value, address, err); return (err < 0) ? err : 0; } /* Dumps all bridge registers */ static void stv06xx_dump_bridge(struct sd *sd) { int i; u8 data, buf; pr_info("Dumping all stv06xx bridge registers\n"); for (i = 0x1400; i < 0x160f; i++) { stv06xx_read_bridge(sd, i, &data); pr_info("Read 0x%x from address 0x%x\n", data, i); } pr_info("Testing stv06xx bridge registers for writability\n"); for (i = 0x1400; i < 0x160f; i++) { stv06xx_read_bridge(sd, i, &data); buf = data; stv06xx_write_bridge(sd, i, 0xff); stv06xx_read_bridge(sd, i, &data); if (data == 0xff) pr_info("Register 0x%x is read/write\n", i); else if (data != buf) pr_info("Register 0x%x is read/write, but only partially\n", i); else pr_info("Register 0x%x is read-only\n", i); stv06xx_write_bridge(sd, i, buf); } } /* this function is called at probe and resume time */ static int stv06xx_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int err; gspca_dbg(gspca_dev, D_PROBE, "Initializing camera\n"); /* Let the usb init settle for a bit before performing the initialization */ msleep(250); err = sd->sensor->init(sd); if (dump_sensor && sd->sensor->dump) sd->sensor->dump(sd); return (err < 0) ? err : 0; } /* this function is called at probe time */ static int stv06xx_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_PROBE, "Initializing controls\n"); gspca_dev->vdev.ctrl_handler = &gspca_dev->ctrl_handler; return sd->sensor->init_controls(sd); } /* Start the camera */ static int stv06xx_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct usb_host_interface *alt; struct usb_interface *intf; int err, packet_size; intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) { gspca_err(gspca_dev, "Couldn't get altsetting\n"); return -EIO; } if (alt->desc.bNumEndpoints < 1) return -ENODEV; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size); if (err < 0) return err; /* Prepare the sensor for start */ err = sd->sensor->start(sd); if (err < 0) goto out; /* Start isochronous streaming */ err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 1); out: if (err < 0) gspca_dbg(gspca_dev, D_STREAM, "Starting stream failed\n"); else gspca_dbg(gspca_dev, D_STREAM, "Started streaming\n"); return (err < 0) ? err : 0; } static int stv06xx_isoc_init(struct gspca_dev *gspca_dev) { struct usb_interface_cache *intfc; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; intfc = gspca_dev->dev->actconfig->intf_cache[0]; if (intfc->num_altsetting < 2) return -ENODEV; alt = &intfc->altsetting[1]; if (alt->desc.bNumEndpoints < 1) return -ENODEV; /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]); return 0; } static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev) { int ret, packet_size, min_packet_size; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; /* * Existence of altsetting and endpoint was verified in * stv06xx_isoc_init() */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode]; if (packet_size <= min_packet_size) return -EIO; packet_size -= 100; if (packet_size < min_packet_size) packet_size = min_packet_size; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); if (ret < 0) gspca_err(gspca_dev, "set alt 1 err %d\n", ret); return ret; } static void stv06xx_stopN(struct gspca_dev *gspca_dev) { int err; struct sd *sd = (struct sd *) gspca_dev; /* stop ISO-streaming */ err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 0); if (err < 0) goto out; err = sd->sensor->stop(sd); out: if (err < 0) gspca_dbg(gspca_dev, D_STREAM, "Failed to stop stream\n"); else gspca_dbg(gspca_dev, D_STREAM, "Stopped streaming\n"); } /* * Analyse an USB packet of the data stream and store it appropriately. * Each packet contains an integral number of chunks. Each chunk has * 2-bytes identification, followed by 2-bytes that describe the chunk * length. Known/guessed chunk identifications are: * 8001/8005/C001/C005 - Begin new frame * 8002/8006/C002/C006 - End frame * 0200/4200 - Contains actual image data, bayer or compressed * 0005 - 11 bytes of unknown data * 0100 - 2 bytes of unknown data * The 0005 and 0100 chunks seem to appear only in compressed stream. */ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_PACK, "Packet of length %d arrived\n", len); /* A packet may contain several frames loop until the whole packet is reached */ while (len) { int id, chunk_len; if (len < 4) { gspca_dbg(gspca_dev, D_PACK, "Packet is smaller than 4 bytes\n"); return; } /* Capture the id */ id = (data[0] << 8) | data[1]; /* Capture the chunk length */ chunk_len = (data[2] << 8) | data[3]; gspca_dbg(gspca_dev, D_PACK, "Chunk id: %x, length: %d\n", id, chunk_len); data += 4; len -= 4; if (len < chunk_len) { gspca_err(gspca_dev, "URB packet length is smaller than the specified chunk length\n"); gspca_dev->last_packet_type = DISCARD_PACKET; return; } /* First byte seem to be 02=data 2nd byte is unknown??? */ if (sd->bridge == BRIDGE_ST6422 && (id & 0xff00) == 0x0200) goto frame_data; switch (id) { case 0x0200: case 0x4200: frame_data: gspca_dbg(gspca_dev, D_PACK, "Frame data packet detected\n"); if (sd->to_skip) { int skip = (sd->to_skip < chunk_len) ? sd->to_skip : chunk_len; data += skip; len -= skip; chunk_len -= skip; sd->to_skip -= skip; } gspca_frame_add(gspca_dev, INTER_PACKET, data, chunk_len); break; case 0x8001: case 0x8005: case 0xc001: case 0xc005: gspca_dbg(gspca_dev, D_PACK, "Starting new frame\n"); /* Create a new frame, chunk length should be zero */ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); if (sd->bridge == BRIDGE_ST6422) sd->to_skip = gspca_dev->pixfmt.width * 4; if (chunk_len) gspca_err(gspca_dev, "Chunk length is non-zero on a SOF\n"); break; case 0x8002: case 0x8006: case 0xc002: gspca_dbg(gspca_dev, D_PACK, "End of frame detected\n"); /* Complete the last frame (if any) */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); if (chunk_len) gspca_err(gspca_dev, "Chunk length is non-zero on a EOF\n"); break; case 0x0005: gspca_dbg(gspca_dev, D_PACK, "Chunk 0x005 detected\n"); /* Unknown chunk with 11 bytes of data, occurs just before end of each frame in compressed mode */ break; case 0x0100: gspca_dbg(gspca_dev, D_PACK, "Chunk 0x0100 detected\n"); /* Unknown chunk with 2 bytes of data, occurs 2-3 times per USB interrupt */ break; case 0x42ff: gspca_dbg(gspca_dev, D_PACK, "Chunk 0x42ff detected\n"); /* Special chunk seen sometimes on the ST6422 */ break; default: gspca_dbg(gspca_dev, D_PACK, "Unknown chunk 0x%04x detected\n", id); /* Unknown chunk */ } data += chunk_len; len -= chunk_len; } } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; if (len == 1 && (data[0] == 0x80 || data[0] == 0x10)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); ret = 0; } if (len == 1 && (data[0] == 0x88 || data[0] == 0x11)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } return ret; } #endif static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id); static void stv06xx_probe_error(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; kfree(sd->sensor_priv); sd->sensor_priv = NULL; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = stv06xx_config, .init = stv06xx_init, .init_controls = stv06xx_init_controls, .probe_error = stv06xx_probe_error, .start = stv06xx_start, .stopN = stv06xx_stopN, .pkt_scan = stv06xx_pkt_scan, .isoc_init = stv06xx_isoc_init, .isoc_nego = stv06xx_isoc_nego, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* This function is called at probe time */ static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_PROBE, "Configuring camera\n"); sd->bridge = id->driver_info; gspca_dev->sd_desc = &sd_desc; if (dump_bridge) stv06xx_dump_bridge(sd); sd->sensor = &stv06xx_sensor_st6422; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_vv6410; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_hdcs1x00; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_hdcs1020; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_pb0100; if (!sd->sensor->probe(sd)) return 0; sd->sensor = NULL; return -ENODEV; } /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, /* QuickCam Express */ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 }, /* LEGO cam / QuickCam Web */ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 }, /* Dexxa WebCam USB */ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger */ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, /* QuickCam Communicate */ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger (new) */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static void sd_disconnect(struct usb_interface *intf) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); struct sd *sd = (struct sd *) gspca_dev; void *priv = sd->sensor_priv; gspca_dbg(gspca_dev, D_PROBE, "Disconnecting the stv06xx device\n"); sd->sensor = NULL; gspca_disconnect(intf); kfree(priv); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = sd_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(dump_bridge, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup"); module_param(dump_sensor, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_sensor, "Dumps all sensor registers at startup"); |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include <linux/module.h> #include <linux/usb.h> #include "main.h" #include "rtw8821c.h" #include "usb.h" static const struct usb_device_id rtw_8821cu_id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */ {}, }; MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table); static int rtw_8821cu_probe(struct usb_interface *intf, const struct usb_device_id *id) { return rtw_usb_probe(intf, id); } static struct usb_driver rtw_8821cu_driver = { .name = KBUILD_MODNAME, .id_table = rtw_8821cu_id_table, .probe = rtw_8821cu_probe, .disconnect = rtw_usb_disconnect, }; module_usb_driver(rtw_8821cu_driver); MODULE_AUTHOR("Hans Ulli Kroll <linux@ulli-kroll.de>"); MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821cu driver"); MODULE_LICENSE("Dual BSD/GPL"); |
3323 3382 120 1115 3260 3386 1104 134 3247 3384 15 15 3 3225 405 158 225 85 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H #define __LINUX_INSIDE_SPINLOCK_H /* * include/linux/spinlock.h - generic spinlock/rwlock declarations * * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * * linux/spinlock_types_raw: * The raw types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types_raw: * The raw RT types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the arch_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ #include <linux/typecheck.h> #include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> #include <linux/irqflags.h> #include <linux/thread_info.h> #include <linux/stringify.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> #include <linux/cleanup.h> #include <asm/barrier.h> #include <asm/mmiowb.h> /* * Must define these before including other files, inline functions need them */ #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME #define LOCK_SECTION_START(extra) \ ".subsection 1\n\t" \ extra \ ".ifndef " LOCK_SECTION_NAME "\n\t" \ LOCK_SECTION_NAME ":\n\t" \ ".endif\n" #define LOCK_SECTION_END \ ".previous\n\t" #define __lockfunc __section(".spinlock.text") /* * Pull the arch_spinlock_t and arch_rwlock_t definitions: */ #include <linux/spinlock_types.h> /* * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): */ #ifdef CONFIG_SMP # include <asm/spinlock.h> #else # include <linux/spinlock_up.h> #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, struct lock_class_key *key, short inner); # define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ } while (0) #else # define raw_spin_lock_init(lock) \ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) #endif #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #ifdef arch_spin_is_contended #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else #define raw_spin_is_contended(lock) (((void)(lock), 0)) #endif /*arch_spin_is_contended*/ /* * smp_mb__after_spinlock() provides the equivalent of a full memory barrier * between program-order earlier lock acquisitions and program-order later * memory accesses. * * This guarantees that the following two properties hold: * * 1) Given the snippet: * * { X = 0; Y = 0; } * * CPU0 CPU1 * * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); * spin_lock(S); smp_mb(); * smp_mb__after_spinlock(); r1 = READ_ONCE(X); * r0 = READ_ONCE(Y); * spin_unlock(S); * * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments * preceding the call to smp_mb__after_spinlock() in __schedule() and in * try_to_wake_up(). * * 2) Given the snippet: * * { X = 0; Y = 0; } * * CPU0 CPU1 CPU2 * * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); * WRITE_ONCE(Y, 1); * spin_unlock(S); * * it is forbidden that CPU0's critical section executes before CPU1's * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments * preceding the calls to smp_rmb() in try_to_wake_up() for similar * snippets but "projected" onto two CPUs. * * Property (2) upgrades the lock to an RCsc lock. * * Since most load-store architectures implement ACQUIRE with an smp_mb() after * the LL/SC loop, they need no further barriers. Similarly all our TSO * architectures imply an smp_mb() for each atomic instruction and equally don't * need more. * * Architectures that can implement ACQUIRE better need to take care. */ #ifndef smp_mb__after_spinlock #define smp_mb__after_spinlock() kcsan_mb() #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); extern int do_raw_spin_trylock(raw_spinlock_t *lock); extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); #else static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); mmiowb_spin_lock(); } static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&(lock)->raw_lock); if (ret) mmiowb_spin_lock(); return ret; } static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) { mmiowb_spin_unlock(); arch_spin_unlock(&lock->raw_lock); __release(lock); } #endif /* * Define the various spin_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The * various methods are defined as nops in the case they are not * required. */ #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) #define raw_spin_lock(lock) _raw_spin_lock(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else /* * Always evaluate the 'subclass' argument to avoid that the compiler * warns about set-but-not-used variables when building with * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. */ # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) #endif #else #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_spin_lock_irqsave(lock, flags); \ } while (0) #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ raw_spin_lock_irqsave(lock, flags) #endif #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) #define raw_spin_unlock(lock) _raw_spin_unlock(lock) #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) #define raw_spin_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_spin_unlock_irqrestore(lock, flags); \ } while (0) #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) #define raw_spin_trylock_bh(lock) \ __cond_lock(lock, _raw_spin_trylock_bh(lock)) #define raw_spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ }) #define raw_spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) #ifndef CONFIG_PREEMPT_RT /* Include rwlock functions for !RT */ #include <linux/rwlock.h> #endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) # include <linux/spinlock_api_smp.h> #else # include <linux/spinlock_api_up.h> #endif /* Non PREEMPT_RT kernel, map to raw spinlocks: */ #ifndef CONFIG_PREEMPT_RT /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) { return &lock->rlock; } #ifdef CONFIG_DEBUG_SPINLOCK # define spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __raw_spin_lock_init(spinlock_check(lock), \ #lock, &__key, LD_WAIT_CONFIG); \ } while (0) #else # define spin_lock_init(_lock) \ do { \ spinlock_check(_lock); \ *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ } while (0) #endif static __always_inline void spin_lock(spinlock_t *lock) { raw_spin_lock(&lock->rlock); } static __always_inline void spin_lock_bh(spinlock_t *lock) { raw_spin_lock_bh(&lock->rlock); } static __always_inline int spin_trylock(spinlock_t *lock) { return raw_spin_trylock(&lock->rlock); } #define spin_lock_nested(lock, subclass) \ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) #define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) static __always_inline void spin_lock_irq(spinlock_t *lock) { raw_spin_lock_irq(&lock->rlock); } #define spin_lock_irqsave(lock, flags) \ do { \ raw_spin_lock_irqsave(spinlock_check(lock), flags); \ } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) static __always_inline void spin_unlock(spinlock_t *lock) { raw_spin_unlock(&lock->rlock); } static __always_inline void spin_unlock_bh(spinlock_t *lock) { raw_spin_unlock_bh(&lock->rlock); } static __always_inline void spin_unlock_irq(spinlock_t *lock) { raw_spin_unlock_irq(&lock->rlock); } static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { raw_spin_unlock_irqrestore(&lock->rlock, flags); } static __always_inline int spin_trylock_bh(spinlock_t *lock) { return raw_spin_trylock_bh(&lock->rlock); } static __always_inline int spin_trylock_irq(spinlock_t *lock) { return raw_spin_trylock_irq(&lock->rlock); } #define spin_trylock_irqsave(lock, flags) \ ({ \ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) /** * spin_is_locked() - Check whether a spinlock is locked. * @lock: Pointer to the spinlock. * * This function is NOT required to provide any memory ordering * guarantees; it could be used for debugging purposes or, when * additional synchronization is needed, accompanied with other * constructs (memory barriers) enforcing the synchronization. * * Returns: 1 if @lock is locked, 0 otherwise. * * Note that the function only tells you that the spinlock is * seen to be locked, not that it is locked on your CPU. * * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, * the return value is always 0 (see include/linux/spinlock_up.h). * Therefore you should not rely heavily on the return value. */ static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); } static __always_inline int spin_is_contended(spinlock_t *lock) { return raw_spin_is_contended(&lock->rlock); } #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) #else /* !CONFIG_PREEMPT_RT */ # include <linux/spinlock_rt.h> #endif /* CONFIG_PREEMPT_RT */ /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPTION, * but a general need for low latency) */ static inline int spin_needbreak(spinlock_t *lock) { if (!preempt_model_preemptible()) return 0; return spin_is_contended(lock); } /* * Check if a rwlock is contended. * Returns non-zero if there is another task waiting on the rwlock. * Returns zero if the lock is not contended or the system / underlying * rwlock implementation does not support contention detection. * Technically does not depend on CONFIG_PREEMPTION, but a general need * for low latency. */ static inline int rwlock_needbreak(rwlock_t *lock) { if (!preempt_model_preemptible()) return 0; return rwlock_is_contended(lock); } /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) */ #include <linux/atomic.h> /** * atomic_dec_and_lock - lock on reaching reference count zero * @atomic: the atomic counter * @lock: the spinlock in question * * Decrements @atomic by 1. If the result is 0, returns true and locks * @lock. Returns false for all other cases. */ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, unsigned long *flags); #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock); #define atomic_dec_and_raw_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock)) extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, unsigned long *flags); #define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \ __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))) int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, size_t max_size, unsigned int cpu_mult, gfp_t gfp, const char *name, struct lock_class_key *key); #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ ({ \ static struct lock_class_key key; \ int ret; \ \ ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ cpu_mult, gfp, #locks, &key); \ ret; \ }) void free_bucket_spinlocks(spinlock_t *locks); DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, raw_spin_lock(_T->lock), raw_spin_unlock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock)) DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), raw_spin_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, raw_spin_lock_irq(_T->lock), raw_spin_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t, raw_spin_lock_bh(_T->lock), raw_spin_unlock_bh(_T->lock)) DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock)) DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, raw_spin_lock_irqsave(_T->lock, _T->flags), raw_spin_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try, raw_spin_trylock_irqsave(_T->lock, _T->flags)) DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, spin_lock(_T->lock), spin_unlock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock)) DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, spin_lock_irq(_T->lock), spin_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, spin_trylock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t, spin_lock_bh(_T->lock), spin_unlock_bh(_T->lock)) DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try, spin_trylock_bh(_T->lock)) DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, spin_lock_irqsave(_T->lock, _T->flags), spin_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try, spin_trylock_irqsave(_T->lock, _T->flags)) DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, read_lock(_T->lock), read_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t, read_lock_irq(_T->lock), read_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t, read_lock_irqsave(_T->lock, _T->flags), read_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) DEFINE_LOCK_GUARD_1(write_lock, rwlock_t, write_lock(_T->lock), write_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t, write_lock_irq(_T->lock), write_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t, write_lock_irqsave(_T->lock, _T->flags), write_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) #undef __LINUX_INSIDE_SPINLOCK_H #endif /* __LINUX_SPINLOCK_H */ |
40 40 40 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 | // SPDX-License-Identifier: GPL-2.0-only /* * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias. * * Copyright (C) 2015, Intel Corp. * Author: Mika Westerberg <mika.westerberg@linux.intel.com> * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/export.h> #include <linux/nls.h> #include "internal.h" static ssize_t acpi_object_path(acpi_handle handle, char *buf) { struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; int result; result = acpi_get_name(handle, ACPI_FULL_PATHNAME, &path); if (result) return result; result = sprintf(buf, "%s\n", (char *)path.pointer); kfree(path.pointer); return result; } struct acpi_data_node_attr { struct attribute attr; ssize_t (*show)(struct acpi_data_node *, char *); ssize_t (*store)(struct acpi_data_node *, const char *, size_t count); }; #define DATA_NODE_ATTR(_name) \ static struct acpi_data_node_attr data_node_##_name = \ __ATTR(_name, 0444, data_node_show_##_name, NULL) static ssize_t data_node_show_path(struct acpi_data_node *dn, char *buf) { return dn->handle ? acpi_object_path(dn->handle, buf) : 0; } DATA_NODE_ATTR(path); static struct attribute *acpi_data_node_default_attrs[] = { &data_node_path.attr, NULL }; ATTRIBUTE_GROUPS(acpi_data_node_default); #define to_data_node(k) container_of(k, struct acpi_data_node, kobj) #define to_attr(a) container_of(a, struct acpi_data_node_attr, attr) static ssize_t acpi_data_node_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct acpi_data_node *dn = to_data_node(kobj); struct acpi_data_node_attr *dn_attr = to_attr(attr); return dn_attr->show ? dn_attr->show(dn, buf) : -ENXIO; } static const struct sysfs_ops acpi_data_node_sysfs_ops = { .show = acpi_data_node_attr_show, }; static void acpi_data_node_release(struct kobject *kobj) { struct acpi_data_node *dn = to_data_node(kobj); complete(&dn->kobj_done); } static const struct kobj_type acpi_data_node_ktype = { .sysfs_ops = &acpi_data_node_sysfs_ops, .default_groups = acpi_data_node_default_groups, .release = acpi_data_node_release, }; static void acpi_expose_nondev_subnodes(struct kobject *kobj, struct acpi_device_data *data) { struct list_head *list = &data->subnodes; struct acpi_data_node *dn; if (list_empty(list)) return; list_for_each_entry(dn, list, sibling) { int ret; init_completion(&dn->kobj_done); ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype, kobj, "%s", dn->name); if (!ret) acpi_expose_nondev_subnodes(&dn->kobj, &dn->data); else if (dn->handle) acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret); } } static void acpi_hide_nondev_subnodes(struct acpi_device_data *data) { struct list_head *list = &data->subnodes; struct acpi_data_node *dn; if (list_empty(list)) return; list_for_each_entry_reverse(dn, list, sibling) { acpi_hide_nondev_subnodes(&dn->data); kobject_put(&dn->kobj); } } /** * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent * @acpi_dev: ACPI device object. * @modalias: Buffer to print into. * @size: Size of the buffer. * * Creates hid/cid(s) string needed for modalias and uevent * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: * char *modalias: "acpi:IBM0001:ACPI0001" * Return: 0: no _HID and no _CID * -EINVAL: output error * -ENOMEM: output is truncated */ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { int len; int count; struct acpi_hardware_id *id; /* Avoid unnecessarily loading modules for non present devices. */ if (!acpi_device_is_present(acpi_dev)) return 0; /* * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the * device's list. */ count = 0; list_for_each_entry(id, &acpi_dev->pnp.ids, list) if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) count++; if (!count) return 0; len = snprintf(modalias, size, "acpi:"); if (len >= size) return -ENOMEM; size -= len; list_for_each_entry(id, &acpi_dev->pnp.ids, list) { if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) continue; count = snprintf(&modalias[len], size, "%s:", id->id); if (count >= size) return -ENOMEM; len += count; size -= count; } return len; } /** * create_of_modalias - Creates DT compatible string for modalias and uevent * @acpi_dev: ACPI device object. * @modalias: Buffer to print into. * @size: Size of the buffer. * * Expose DT compatible modalias as of:NnameTCcompatible. This function should * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of * ACPI/PNP IDs. */ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; const union acpi_object *of_compatible, *obj; acpi_status status; int len, count; int i, nval; char *c; status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); if (ACPI_FAILURE(status)) return -ENODEV; /* DT strings are all in lower case */ for (c = buf.pointer; *c != '\0'; c++) *c = tolower(*c); len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); ACPI_FREE(buf.pointer); if (len >= size) return -ENOMEM; size -= len; of_compatible = acpi_dev->data.of_compatible; if (of_compatible->type == ACPI_TYPE_PACKAGE) { nval = of_compatible->package.count; obj = of_compatible->package.elements; } else { /* Must be ACPI_TYPE_STRING. */ nval = 1; obj = of_compatible; } for (i = 0; i < nval; i++, obj++) { count = snprintf(&modalias[len], size, "C%s", obj->string.pointer); if (count >= size) return -ENOMEM; len += count; size -= count; } return len; } int __acpi_device_uevent_modalias(const struct acpi_device *adev, struct kobj_uevent_env *env) { int len; if (!adev) return -ENODEV; if (list_empty(&adev->pnp.ids)) return 0; if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; if (adev->data.of_compatible) len = create_of_modalias(adev, &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); else len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); if (len < 0) return len; env->buflen += len; return 0; } /** * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices. * @dev: Struct device to get ACPI device node. * @env: Environment variables of the kobject uevent. * * Create the uevent modalias field for ACPI-enumerated devices. * * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". */ int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); } EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); static int __acpi_device_modalias(const struct acpi_device *adev, char *buf, int size) { int len, count; if (!adev) return -ENODEV; if (list_empty(&adev->pnp.ids)) return 0; len = create_pnp_modalias(adev, buf, size - 1); if (len < 0) { return len; } else if (len > 0) { buf[len++] = '\n'; size -= len; } if (!adev->data.of_compatible) return len; count = create_of_modalias(adev, buf + len, size - 1); if (count < 0) { return count; } else if (count > 0) { len += count; buf[len++] = '\n'; } return len; } /** * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices. * @dev: Struct device to get ACPI device node. * @buf: The buffer to save pnp_modalias and of_modalias. * @size: Size of buffer. * * Create the modalias sysfs attribute for ACPI-enumerated devices. * * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". */ int acpi_device_modalias(struct device *dev, char *buf, int size) { return __acpi_device_modalias(acpi_companion_match(dev), buf, size); } EXPORT_SYMBOL_GPL(acpi_device_modalias); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); } static DEVICE_ATTR_RO(modalias); static ssize_t real_power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); int state; int ret; ret = acpi_device_get_power(adev, &state); if (ret) return ret; return sprintf(buf, "%s\n", acpi_power_state_string(state)); } static DEVICE_ATTR_RO(real_power_state); static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); } static DEVICE_ATTR_RO(power_state); static ssize_t eject_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi_device = to_acpi_device(d); acpi_object_type not_used; acpi_status status; if (!count || buf[0] != '1') return -EINVAL; if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) && !d->driver) return -ENODEV; status = acpi_get_type(acpi_device->handle, ¬_used); if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) return -ENODEV; acpi_dev_get(acpi_device); status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); if (ACPI_SUCCESS(status)) return count; acpi_dev_put(acpi_device); acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; } static DEVICE_ATTR_WO(eject); static ssize_t hid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); } static DEVICE_ATTR_RO(hid); static ssize_t uid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_uid(acpi_dev)); } static DEVICE_ATTR_RO(uid); static ssize_t adr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); if (acpi_dev->pnp.bus_address > U32_MAX) return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address); else return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address); } static DEVICE_ATTR_RO(adr); static ssize_t path_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return acpi_object_path(acpi_dev->handle, buf); } static DEVICE_ATTR_RO(path); /* sysfs file that shows description text from the ACPI _STR method */ static ssize_t description_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *str_obj; acpi_status status; int result; status = acpi_evaluate_object_typed(acpi_dev->handle, "_STR", NULL, &buffer, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return -EIO; str_obj = buffer.pointer; /* * The _STR object contains a Unicode identifier for a device. * We need to convert to utf-8 so it can be displayed. */ result = utf16s_to_utf8s( (wchar_t *)str_obj->buffer.pointer, str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[result++] = '\n'; kfree(str_obj); return result; } static DEVICE_ATTR_RO(description); static ssize_t sun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sun; status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", sun); } static DEVICE_ATTR_RO(sun); static ssize_t hrv_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long hrv; status = acpi_evaluate_integer(acpi_dev->handle, "_HRV", NULL, &hrv); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", hrv); } static DEVICE_ATTR_RO(hrv); static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sta; status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", sta); } static DEVICE_ATTR_RO(status); static struct attribute *acpi_attrs[] = { &dev_attr_path.attr, &dev_attr_hid.attr, &dev_attr_modalias.attr, &dev_attr_description.attr, &dev_attr_adr.attr, &dev_attr_uid.attr, &dev_attr_sun.attr, &dev_attr_hrv.attr, &dev_attr_status.attr, &dev_attr_eject.attr, &dev_attr_power_state.attr, &dev_attr_real_power_state.attr, NULL }; static bool acpi_show_attr(struct acpi_device *dev, const struct device_attribute *attr) { /* * Devices gotten from FADT don't have a "path" attribute */ if (attr == &dev_attr_path) return dev->handle; if (attr == &dev_attr_hid || attr == &dev_attr_modalias) return !list_empty(&dev->pnp.ids); if (attr == &dev_attr_description) return acpi_has_method(dev->handle, "_STR"); if (attr == &dev_attr_adr) return dev->pnp.type.bus_address; if (attr == &dev_attr_uid) return acpi_device_uid(dev); if (attr == &dev_attr_sun) return acpi_has_method(dev->handle, "_SUN"); if (attr == &dev_attr_hrv) return acpi_has_method(dev->handle, "_HRV"); if (attr == &dev_attr_status) return acpi_has_method(dev->handle, "_STA"); /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. */ if (attr == &dev_attr_eject) return acpi_has_method(dev->handle, "_EJ0"); if (attr == &dev_attr_power_state) return dev->flags.power_manageable; if (attr == &dev_attr_real_power_state) return dev->flags.power_manageable && dev->power.flags.power_resources; dev_warn_once(&dev->dev, "Unexpected attribute: %s\n", attr->attr.name); return false; } static umode_t acpi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int attrno) { struct acpi_device *dev = to_acpi_device(kobj_to_dev(kobj)); if (acpi_show_attr(dev, container_of(attr, struct device_attribute, attr))) return attr->mode; else return 0; } static const struct attribute_group acpi_group = { .attrs = acpi_attrs, .is_visible = acpi_attr_is_visible, }; const struct attribute_group *acpi_groups[] = { &acpi_group, NULL }; /** * acpi_device_setup_files - Create sysfs attributes of an ACPI device. * @dev: ACPI device object. */ void acpi_device_setup_files(struct acpi_device *dev) { acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data); } /** * acpi_device_remove_files - Remove sysfs attributes of an ACPI device. * @dev: ACPI device object. */ void acpi_device_remove_files(struct acpi_device *dev) { acpi_hide_nondev_subnodes(&dev->data); } |
2 1 2 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 | // SPDX-License-Identifier: GPL-2.0 /* * Fintek F81232 USB to serial adaptor driver * Fintek F81532A/534A/535/536 USB to 2/4/8/12 serial adaptor driver * * Copyright (C) 2012 Greg Kroah-Hartman (gregkh@linuxfoundation.org) * Copyright (C) 2012 Linux Foundation */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial_reg.h> #define F81232_ID \ { USB_DEVICE(0x1934, 0x0706) } /* 1 port UART device */ #define F81534A_SERIES_ID \ { USB_DEVICE(0x2c42, 0x1602) }, /* In-Box 2 port UART device */ \ { USB_DEVICE(0x2c42, 0x1604) }, /* In-Box 4 port UART device */ \ { USB_DEVICE(0x2c42, 0x1605) }, /* In-Box 8 port UART device */ \ { USB_DEVICE(0x2c42, 0x1606) }, /* In-Box 12 port UART device */ \ { USB_DEVICE(0x2c42, 0x1608) }, /* Non-Flash type */ \ { USB_DEVICE(0x2c42, 0x1632) }, /* 2 port UART device */ \ { USB_DEVICE(0x2c42, 0x1634) }, /* 4 port UART device */ \ { USB_DEVICE(0x2c42, 0x1635) }, /* 8 port UART device */ \ { USB_DEVICE(0x2c42, 0x1636) } /* 12 port UART device */ #define F81534A_CTRL_ID \ { USB_DEVICE(0x2c42, 0x16f8) } /* Global control device */ static const struct usb_device_id f81232_id_table[] = { F81232_ID, { } /* Terminating entry */ }; static const struct usb_device_id f81534a_id_table[] = { F81534A_SERIES_ID, { } /* Terminating entry */ }; static const struct usb_device_id f81534a_ctrl_id_table[] = { F81534A_CTRL_ID, { } /* Terminating entry */ }; static const struct usb_device_id combined_id_table[] = { F81232_ID, F81534A_SERIES_ID, F81534A_CTRL_ID, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, combined_id_table); /* Maximum baudrate for F81232 */ #define F81232_MAX_BAUDRATE 1500000 #define F81232_DEF_BAUDRATE 9600 /* USB Control EP parameter */ #define F81232_REGISTER_REQUEST 0xa0 #define F81232_GET_REGISTER 0xc0 #define F81232_SET_REGISTER 0x40 #define F81534A_ACCESS_REG_RETRY 2 #define SERIAL_BASE_ADDRESS 0x0120 #define RECEIVE_BUFFER_REGISTER (0x00 + SERIAL_BASE_ADDRESS) #define INTERRUPT_ENABLE_REGISTER (0x01 + SERIAL_BASE_ADDRESS) #define FIFO_CONTROL_REGISTER (0x02 + SERIAL_BASE_ADDRESS) #define LINE_CONTROL_REGISTER (0x03 + SERIAL_BASE_ADDRESS) #define MODEM_CONTROL_REGISTER (0x04 + SERIAL_BASE_ADDRESS) #define LINE_STATUS_REGISTER (0x05 + SERIAL_BASE_ADDRESS) #define MODEM_STATUS_REGISTER (0x06 + SERIAL_BASE_ADDRESS) /* * F81232 Clock registers (106h) * * Bit1-0: Clock source selector * 00: 1.846MHz. * 01: 18.46MHz. * 10: 24MHz. * 11: 14.77MHz. */ #define F81232_CLK_REGISTER 0x106 #define F81232_CLK_1_846_MHZ 0 #define F81232_CLK_18_46_MHZ BIT(0) #define F81232_CLK_24_MHZ BIT(1) #define F81232_CLK_14_77_MHZ (BIT(1) | BIT(0)) #define F81232_CLK_MASK GENMASK(1, 0) #define F81534A_MODE_REG 0x107 #define F81534A_TRIGGER_MASK GENMASK(3, 2) #define F81534A_TRIGGER_MULTIPLE_4X BIT(3) #define F81534A_FIFO_128BYTE (BIT(1) | BIT(0)) /* Serial port self GPIO control, 2bytes [control&output data][input data] */ #define F81534A_GPIO_REG 0x10e #define F81534A_GPIO_MODE2_DIR BIT(6) /* 1: input, 0: output */ #define F81534A_GPIO_MODE1_DIR BIT(5) #define F81534A_GPIO_MODE0_DIR BIT(4) #define F81534A_GPIO_MODE2_OUTPUT BIT(2) #define F81534A_GPIO_MODE1_OUTPUT BIT(1) #define F81534A_GPIO_MODE0_OUTPUT BIT(0) #define F81534A_CTRL_CMD_ENABLE_PORT 0x116 struct f81232_private { struct mutex lock; u8 modem_control; u8 modem_status; u8 shadow_lcr; speed_t baud_base; struct work_struct lsr_work; struct work_struct interrupt_work; struct usb_serial_port *port; }; static u32 const baudrate_table[] = { 115200, 921600, 1152000, 1500000 }; static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ, F81232_CLK_18_46_MHZ, F81232_CLK_24_MHZ }; static int calc_baud_divisor(speed_t baudrate, speed_t clockrate) { return DIV_ROUND_CLOSEST(clockrate, baudrate); } static int f81232_get_register(struct usb_serial_port *port, u16 reg, u8 *val) { int status; struct usb_device *dev = port->serial->dev; status = usb_control_msg_recv(dev, 0, F81232_REGISTER_REQUEST, F81232_GET_REGISTER, reg, 0, val, sizeof(*val), USB_CTRL_GET_TIMEOUT, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s failed status: %d\n", __func__, status); status = usb_translate_errors(status); } return status; } static int f81232_set_register(struct usb_serial_port *port, u16 reg, u8 val) { int status; struct usb_device *dev = port->serial->dev; status = usb_control_msg_send(dev, 0, F81232_REGISTER_REQUEST, F81232_SET_REGISTER, reg, 0, &val, sizeof(val), USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s failed status: %d\n", __func__, status); status = usb_translate_errors(status); } return status; } static int f81232_set_mask_register(struct usb_serial_port *port, u16 reg, u8 mask, u8 val) { int status; u8 tmp; status = f81232_get_register(port, reg, &tmp); if (status) return status; tmp = (tmp & ~mask) | (val & mask); return f81232_set_register(port, reg, tmp); } static void f81232_read_msr(struct usb_serial_port *port) { int status; u8 current_msr; struct tty_struct *tty; struct f81232_private *priv = usb_get_serial_port_data(port); mutex_lock(&priv->lock); status = f81232_get_register(port, MODEM_STATUS_REGISTER, ¤t_msr); if (status) { dev_err(&port->dev, "%s fail, status: %d\n", __func__, status); mutex_unlock(&priv->lock); return; } if (!(current_msr & UART_MSR_ANY_DELTA)) { mutex_unlock(&priv->lock); return; } priv->modem_status = current_msr; if (current_msr & UART_MSR_DCTS) port->icount.cts++; if (current_msr & UART_MSR_DDSR) port->icount.dsr++; if (current_msr & UART_MSR_TERI) port->icount.rng++; if (current_msr & UART_MSR_DDCD) { port->icount.dcd++; tty = tty_port_tty_get(&port->port); if (tty) { usb_serial_handle_dcd_change(port, tty, current_msr & UART_MSR_DCD); tty_kref_put(tty); } } wake_up_interruptible(&port->port.delta_msr_wait); mutex_unlock(&priv->lock); } static int f81232_set_mctrl(struct usb_serial_port *port, unsigned int set, unsigned int clear) { u8 val; int status; struct f81232_private *priv = usb_get_serial_port_data(port); if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) return 0; /* no change */ /* 'set' takes precedence over 'clear' */ clear &= ~set; /* force enable interrupt with OUT2 */ mutex_lock(&priv->lock); val = UART_MCR_OUT2 | priv->modem_control; if (clear & TIOCM_DTR) val &= ~UART_MCR_DTR; if (clear & TIOCM_RTS) val &= ~UART_MCR_RTS; if (set & TIOCM_DTR) val |= UART_MCR_DTR; if (set & TIOCM_RTS) val |= UART_MCR_RTS; dev_dbg(&port->dev, "%s new:%02x old:%02x\n", __func__, val, priv->modem_control); status = f81232_set_register(port, MODEM_CONTROL_REGISTER, val); if (status) { dev_err(&port->dev, "%s set MCR status < 0\n", __func__); mutex_unlock(&priv->lock); return status; } priv->modem_control = val; mutex_unlock(&priv->lock); return 0; } static void f81232_update_line_status(struct usb_serial_port *port, unsigned char *data, size_t actual_length) { struct f81232_private *priv = usb_get_serial_port_data(port); if (!actual_length) return; switch (data[0] & 0x07) { case 0x00: /* msr change */ dev_dbg(&port->dev, "IIR: MSR Change: %02x\n", data[0]); schedule_work(&priv->interrupt_work); break; case 0x02: /* tx-empty */ break; case 0x04: /* rx data available */ break; case 0x06: /* lsr change */ /* we can forget it. the LSR will read from bulk-in */ dev_dbg(&port->dev, "IIR: LSR Change: %02x\n", data[0]); break; } } static void f81232_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; unsigned int actual_length = urb->actual_length; int status = urb->status; int retval; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, urb->transfer_buffer); f81232_update_line_status(port, data, actual_length); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static char f81232_handle_lsr(struct usb_serial_port *port, u8 lsr) { struct f81232_private *priv = usb_get_serial_port_data(port); char tty_flag = TTY_NORMAL; if (!(lsr & UART_LSR_BRK_ERROR_BITS)) return tty_flag; if (lsr & UART_LSR_BI) { tty_flag = TTY_BREAK; port->icount.brk++; usb_serial_handle_break(port); } else if (lsr & UART_LSR_PE) { tty_flag = TTY_PARITY; port->icount.parity++; } else if (lsr & UART_LSR_FE) { tty_flag = TTY_FRAME; port->icount.frame++; } if (lsr & UART_LSR_OE) { port->icount.overrun++; schedule_work(&priv->lsr_work); tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } return tty_flag; } static void f81232_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; char tty_flag; unsigned int i; u8 lsr; /* * When opening the port we get a 1-byte packet with the current LSR, * which we discard. */ if ((urb->actual_length < 2) || (urb->actual_length % 2)) return; /* bulk-in data: [LSR(1Byte)+DATA(1Byte)][LSR(1Byte)+DATA(1Byte)]... */ for (i = 0; i < urb->actual_length; i += 2) { lsr = data[i]; tty_flag = f81232_handle_lsr(port, lsr); if (port->sysrq) { if (usb_serial_handle_sysrq_char(port, data[i + 1])) continue; } tty_insert_flip_char(&port->port, data[i + 1], tty_flag); } tty_flip_buffer_push(&port->port); } static void f81534a_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; char tty_flag; unsigned int i; u8 lsr; u8 len; if (urb->actual_length < 3) { dev_err(&port->dev, "short message received: %d\n", urb->actual_length); return; } len = data[0]; if (len != urb->actual_length) { dev_err(&port->dev, "malformed message received: %d (%d)\n", urb->actual_length, len); return; } /* bulk-in data: [LEN][Data.....][LSR] */ lsr = data[len - 1]; tty_flag = f81232_handle_lsr(port, lsr); if (port->sysrq) { for (i = 1; i < len - 1; ++i) { if (!usb_serial_handle_sysrq_char(port, data[i])) { tty_insert_flip_char(&port->port, data[i], tty_flag); } } } else { tty_insert_flip_string_fixed_flag(&port->port, &data[1], tty_flag, len - 2); } tty_flip_buffer_push(&port->port); } static int f81232_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct f81232_private *priv = usb_get_serial_port_data(port); int status; mutex_lock(&priv->lock); if (break_state) priv->shadow_lcr |= UART_LCR_SBC; else priv->shadow_lcr &= ~UART_LCR_SBC; status = f81232_set_register(port, LINE_CONTROL_REGISTER, priv->shadow_lcr); if (status) dev_err(&port->dev, "set break failed: %d\n", status); mutex_unlock(&priv->lock); return status; } static int f81232_find_clk(speed_t baudrate) { int idx; for (idx = 0; idx < ARRAY_SIZE(baudrate_table); ++idx) { if (baudrate <= baudrate_table[idx] && baudrate_table[idx] % baudrate == 0) return idx; } return -EINVAL; } static void f81232_set_baudrate(struct tty_struct *tty, struct usb_serial_port *port, speed_t baudrate, speed_t old_baudrate) { struct f81232_private *priv = usb_get_serial_port_data(port); u8 lcr; int divisor; int status = 0; int i; int idx; speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE }; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { baudrate = baud_list[i]; if (baudrate == 0) { tty_encode_baud_rate(tty, 0, 0); return; } idx = f81232_find_clk(baudrate); if (idx >= 0) { tty_encode_baud_rate(tty, baudrate, baudrate); break; } } if (idx < 0) return; priv->baud_base = baudrate_table[idx]; divisor = calc_baud_divisor(baudrate, priv->baud_base); status = f81232_set_mask_register(port, F81232_CLK_REGISTER, F81232_CLK_MASK, clock_table[idx]); if (status) { dev_err(&port->dev, "%s failed to set CLK_REG: %d\n", __func__, status); return; } status = f81232_get_register(port, LINE_CONTROL_REGISTER, &lcr); /* get LCR */ if (status) { dev_err(&port->dev, "%s failed to get LCR: %d\n", __func__, status); return; } status = f81232_set_register(port, LINE_CONTROL_REGISTER, lcr | UART_LCR_DLAB); /* Enable DLAB */ if (status) { dev_err(&port->dev, "%s failed to set DLAB: %d\n", __func__, status); return; } status = f81232_set_register(port, RECEIVE_BUFFER_REGISTER, divisor & 0x00ff); /* low */ if (status) { dev_err(&port->dev, "%s failed to set baudrate MSB: %d\n", __func__, status); goto reapply_lcr; } status = f81232_set_register(port, INTERRUPT_ENABLE_REGISTER, (divisor & 0xff00) >> 8); /* high */ if (status) { dev_err(&port->dev, "%s failed to set baudrate LSB: %d\n", __func__, status); } reapply_lcr: status = f81232_set_register(port, LINE_CONTROL_REGISTER, lcr & ~UART_LCR_DLAB); if (status) { dev_err(&port->dev, "%s failed to set DLAB: %d\n", __func__, status); } } static int f81232_port_enable(struct usb_serial_port *port) { u8 val; int status; /* fifo on, trigger8, clear TX/RX*/ val = UART_FCR_TRIGGER_8 | UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT; status = f81232_set_register(port, FIFO_CONTROL_REGISTER, val); if (status) { dev_err(&port->dev, "%s failed to set FCR: %d\n", __func__, status); return status; } /* MSR Interrupt only, LSR will read from Bulk-in odd byte */ status = f81232_set_register(port, INTERRUPT_ENABLE_REGISTER, UART_IER_MSI); if (status) { dev_err(&port->dev, "%s failed to set IER: %d\n", __func__, status); return status; } return 0; } static int f81232_port_disable(struct usb_serial_port *port) { int status; status = f81232_set_register(port, INTERRUPT_ENABLE_REGISTER, 0); if (status) { dev_err(&port->dev, "%s failed to set IER: %d\n", __func__, status); return status; } return 0; } static void f81232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct f81232_private *priv = usb_get_serial_port_data(port); u8 new_lcr = 0; int status = 0; speed_t baudrate; speed_t old_baud; /* Don't change anything if nothing has changed */ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) return; if (C_BAUD(tty) == B0) f81232_set_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS); else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) f81232_set_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0); baudrate = tty_get_baud_rate(tty); if (baudrate > 0) { if (old_termios) old_baud = tty_termios_baud_rate(old_termios); else old_baud = F81232_DEF_BAUDRATE; f81232_set_baudrate(tty, port, baudrate, old_baud); } if (C_PARENB(tty)) { new_lcr |= UART_LCR_PARITY; if (!C_PARODD(tty)) new_lcr |= UART_LCR_EPAR; if (C_CMSPAR(tty)) new_lcr |= UART_LCR_SPAR; } if (C_CSTOPB(tty)) new_lcr |= UART_LCR_STOP; new_lcr |= UART_LCR_WLEN(tty_get_char_size(tty->termios.c_cflag)); mutex_lock(&priv->lock); new_lcr |= (priv->shadow_lcr & UART_LCR_SBC); status = f81232_set_register(port, LINE_CONTROL_REGISTER, new_lcr); if (status) { dev_err(&port->dev, "%s failed to set LCR: %d\n", __func__, status); } priv->shadow_lcr = new_lcr; mutex_unlock(&priv->lock); } static int f81232_tiocmget(struct tty_struct *tty) { int r; struct usb_serial_port *port = tty->driver_data; struct f81232_private *port_priv = usb_get_serial_port_data(port); u8 mcr, msr; /* force get current MSR changed state */ f81232_read_msr(port); mutex_lock(&port_priv->lock); mcr = port_priv->modem_control; msr = port_priv->modem_status; mutex_unlock(&port_priv->lock); r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) | (mcr & UART_MCR_RTS ? TIOCM_RTS : 0) | (msr & UART_MSR_CTS ? TIOCM_CTS : 0) | (msr & UART_MSR_DCD ? TIOCM_CAR : 0) | (msr & UART_MSR_RI ? TIOCM_RI : 0) | (msr & UART_MSR_DSR ? TIOCM_DSR : 0); return r; } static int f81232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; return f81232_set_mctrl(port, set, clear); } static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port) { int result; result = f81232_port_enable(port); if (result) return result; /* Setup termios */ if (tty) f81232_set_termios(tty, port, NULL); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting interrupt urb," " error %d\n", __func__, result); return result; } result = usb_serial_generic_open(tty, port); if (result) { usb_kill_urb(port->interrupt_in_urb); return result; } return 0; } static int f81534a_open(struct tty_struct *tty, struct usb_serial_port *port) { int status; u8 mask; u8 val; val = F81534A_TRIGGER_MULTIPLE_4X | F81534A_FIFO_128BYTE; mask = F81534A_TRIGGER_MASK | F81534A_FIFO_128BYTE; status = f81232_set_mask_register(port, F81534A_MODE_REG, mask, val); if (status) { dev_err(&port->dev, "failed to set MODE_REG: %d\n", status); return status; } return f81232_open(tty, port); } static void f81232_close(struct usb_serial_port *port) { struct f81232_private *port_priv = usb_get_serial_port_data(port); f81232_port_disable(port); usb_serial_generic_close(port); usb_kill_urb(port->interrupt_in_urb); flush_work(&port_priv->interrupt_work); flush_work(&port_priv->lsr_work); } static void f81232_dtr_rts(struct usb_serial_port *port, int on) { if (on) f81232_set_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0); else f81232_set_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS); } static bool f81232_tx_empty(struct usb_serial_port *port) { int status; u8 tmp; status = f81232_get_register(port, LINE_STATUS_REGISTER, &tmp); if (!status) { if ((tmp & UART_LSR_TEMT) != UART_LSR_TEMT) return false; } return true; } static int f81232_carrier_raised(struct usb_serial_port *port) { u8 msr; struct f81232_private *priv = usb_get_serial_port_data(port); mutex_lock(&priv->lock); msr = priv->modem_status; mutex_unlock(&priv->lock); if (msr & UART_MSR_DCD) return 1; return 0; } static void f81232_get_serial(struct tty_struct *tty, struct serial_struct *ss) { struct usb_serial_port *port = tty->driver_data; struct f81232_private *priv = usb_get_serial_port_data(port); ss->baud_base = priv->baud_base; } static void f81232_interrupt_work(struct work_struct *work) { struct f81232_private *priv = container_of(work, struct f81232_private, interrupt_work); f81232_read_msr(priv->port); } static void f81232_lsr_worker(struct work_struct *work) { struct f81232_private *priv; struct usb_serial_port *port; int status; u8 tmp; priv = container_of(work, struct f81232_private, lsr_work); port = priv->port; status = f81232_get_register(port, LINE_STATUS_REGISTER, &tmp); if (status) dev_warn(&port->dev, "read LSR failed: %d\n", status); } static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg, u16 size, void *val) { struct usb_device *dev = interface_to_usbdev(intf); int retry = F81534A_ACCESS_REG_RETRY; int status; while (retry--) { status = usb_control_msg_send(dev, 0, F81232_REGISTER_REQUEST, F81232_SET_REGISTER, reg, 0, val, size, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (status) { status = usb_translate_errors(status); if (status == -EIO) continue; } break; } if (status) { dev_err(&intf->dev, "failed to set register 0x%x: %d\n", reg, status); } return status; } static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en) { unsigned char enable[2] = {0}; int status; /* * Enable all available serial ports, define as following: * bit 15 : Reset behavior (when HUB got soft reset) * 0: maintain all serial port enabled state. * 1: disable all serial port. * bit 0~11 : Serial port enable bit. */ if (en) { enable[0] = 0xff; enable[1] = 0x8f; } status = f81534a_ctrl_set_register(intf, F81534A_CTRL_CMD_ENABLE_PORT, sizeof(enable), enable); if (status) dev_err(&intf->dev, "failed to enable ports: %d\n", status); return status; } static int f81534a_ctrl_probe(struct usb_interface *intf, const struct usb_device_id *id) { return f81534a_ctrl_enable_all_ports(intf, true); } static void f81534a_ctrl_disconnect(struct usb_interface *intf) { f81534a_ctrl_enable_all_ports(intf, false); } static int f81534a_ctrl_resume(struct usb_interface *intf) { return f81534a_ctrl_enable_all_ports(intf, true); } static int f81232_port_probe(struct usb_serial_port *port) { struct f81232_private *priv; priv = devm_kzalloc(&port->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; mutex_init(&priv->lock); INIT_WORK(&priv->interrupt_work, f81232_interrupt_work); INIT_WORK(&priv->lsr_work, f81232_lsr_worker); usb_set_serial_port_data(port, priv); priv->port = port; return 0; } static int f81534a_port_probe(struct usb_serial_port *port) { int status; /* tri-state with pull-high, default RS232 Mode */ status = f81232_set_register(port, F81534A_GPIO_REG, F81534A_GPIO_MODE2_DIR); if (status) return status; return f81232_port_probe(port); } static int f81232_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_serial_port *port = serial->port[0]; struct f81232_private *port_priv = usb_get_serial_port_data(port); int i; for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) usb_kill_urb(port->read_urbs[i]); usb_kill_urb(port->interrupt_in_urb); if (port_priv) { flush_work(&port_priv->interrupt_work); flush_work(&port_priv->lsr_work); } return 0; } static int f81232_resume(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; int result; if (tty_port_initialized(&port->port)) { result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); if (result) { dev_err(&port->dev, "submit interrupt urb failed: %d\n", result); return result; } } return usb_serial_generic_resume(serial); } static struct usb_serial_driver f81232_device = { .driver = { .name = "f81232", }, .id_table = f81232_id_table, .num_ports = 1, .bulk_in_size = 256, .bulk_out_size = 256, .open = f81232_open, .close = f81232_close, .dtr_rts = f81232_dtr_rts, .carrier_raised = f81232_carrier_raised, .get_serial = f81232_get_serial, .break_ctl = f81232_break_ctl, .set_termios = f81232_set_termios, .tiocmget = f81232_tiocmget, .tiocmset = f81232_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .tx_empty = f81232_tx_empty, .process_read_urb = f81232_process_read_urb, .read_int_callback = f81232_read_int_callback, .port_probe = f81232_port_probe, .suspend = f81232_suspend, .resume = f81232_resume, }; static struct usb_serial_driver f81534a_device = { .driver = { .name = "f81534a", }, .id_table = f81534a_id_table, .num_ports = 1, .open = f81534a_open, .close = f81232_close, .dtr_rts = f81232_dtr_rts, .carrier_raised = f81232_carrier_raised, .get_serial = f81232_get_serial, .break_ctl = f81232_break_ctl, .set_termios = f81232_set_termios, .tiocmget = f81232_tiocmget, .tiocmset = f81232_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .tx_empty = f81232_tx_empty, .process_read_urb = f81534a_process_read_urb, .read_int_callback = f81232_read_int_callback, .port_probe = f81534a_port_probe, .suspend = f81232_suspend, .resume = f81232_resume, }; static struct usb_serial_driver * const serial_drivers[] = { &f81232_device, &f81534a_device, NULL, }; static struct usb_driver f81534a_ctrl_driver = { .name = "f81534a_ctrl", .id_table = f81534a_ctrl_id_table, .probe = f81534a_ctrl_probe, .disconnect = f81534a_ctrl_disconnect, .resume = f81534a_ctrl_resume, }; static int __init f81232_init(void) { int status; status = usb_register_driver(&f81534a_ctrl_driver, THIS_MODULE, KBUILD_MODNAME); if (status) return status; status = usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME, combined_id_table); if (status) { usb_deregister(&f81534a_ctrl_driver); return status; } return 0; } static void __exit f81232_exit(void) { usb_serial_deregister_drivers(serial_drivers); usb_deregister(&f81534a_ctrl_driver); } module_init(f81232_init); module_exit(f81232_exit); MODULE_DESCRIPTION("Fintek F81232/532A/534A/535/536 USB to serial driver"); MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>"); MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>"); MODULE_LICENSE("GPL v2"); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_NULLS_H #define _LINUX_RCULIST_NULLS_H #ifdef __KERNEL__ /* * RCU-protected list version */ #include <linux/list_nulls.h> #include <linux/rcupdate.h> /** * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * * Note: hlist_nulls_unhashed() on the node return true after this. It is * useful for RCU based read lockfree traversal if the writer side * must know if the list entry is still hashed or already unhashed. * * In particular, it means that we can not poison the forward pointers * that may still be used for walking the hash list and we can only * zero the pprev pointer so list_unhashed() will return true after * this. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another * list-mutation primitive, such as hlist_nulls_add_head_rcu() or * hlist_nulls_del_rcu(), running on this same list. However, it is * perfectly legal to run concurrently with the _rcu list-traversal * primitives, such as hlist_nulls_for_each_entry_rcu(). */ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) { if (!hlist_nulls_unhashed(n)) { __hlist_nulls_del(n); WRITE_ONCE(n->pprev, NULL); } } /** * hlist_nulls_first_rcu - returns the first element of the hash list. * @head: the head of the list. */ #define hlist_nulls_first_rcu(head) \ (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) /** * hlist_nulls_next_rcu - returns the element of the list after @node. * @node: element of the list. */ #define hlist_nulls_next_rcu(node) \ (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) /** * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: hlist_nulls_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry(). */ static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) { __hlist_nulls_del(n); WRITE_ONCE(n->pprev, LIST_POISON2); } /** * hlist_nulls_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_nulls, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, struct hlist_nulls_head *h) { struct hlist_nulls_node *first = h->first; WRITE_ONCE(n->next, first); WRITE_ONCE(n->pprev, &h->first); rcu_assign_pointer(hlist_nulls_first_rcu(h), n); if (!is_a_nulls(first)) WRITE_ONCE(first->pprev, &n->next); } /** * hlist_nulls_add_tail_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_nulls, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, struct hlist_nulls_head *h) { struct hlist_nulls_node *i, *last = NULL; /* Note: write side code, so rcu accessors are not needed. */ for (i = h->first; !is_a_nulls(i); i = i->next) last = i; if (last) { WRITE_ONCE(n->next, last->next); n->pprev = &last->next; rcu_assign_pointer(hlist_nulls_next_rcu(last), n); } else { hlist_nulls_add_head_rcu(n, h); } } /* after that hlist_nulls_del will work */ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) { n->pprev = &n->next; n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL); } /** * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. * @head: the head of the list. * @member: the name of the hlist_nulls_node within the struct. * * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] * [1] Documentation/memory-barriers.txt around line 1533 * [2] Documentation/RCU/rculist_nulls.rst around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (({barrier();}), \ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) /** * hlist_nulls_for_each_entry_safe - * iterate over list of given type safe against removal of list entry * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. * @head: the head of the list. * @member: the name of the hlist_nulls_node within the struct. */ #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ for (({barrier();}), \ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) #endif #endif |
3226 3226 3217 3219 3226 3114 3226 3226 3226 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * fs/kernfs/kernfs-internal.h - kernfs internal header file * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007, 2013 Tejun Heo <teheo@suse.de> */ #ifndef __KERNFS_INTERNAL_H #define __KERNFS_INTERNAL_H #include <linux/lockdep.h> #include <linux/fs.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/xattr.h> #include <linux/kernfs.h> #include <linux/fs_context.h> struct kernfs_iattrs { kuid_t ia_uid; kgid_t ia_gid; struct timespec64 ia_atime; struct timespec64 ia_mtime; struct timespec64 ia_ctime; struct simple_xattrs xattrs; atomic_t nr_user_xattrs; atomic_t user_xattr_size; }; struct kernfs_root { /* published fields */ struct kernfs_node *kn; unsigned int flags; /* KERNFS_ROOT_* flags */ /* private fields, do not use outside kernfs proper */ struct idr ino_idr; spinlock_t kernfs_idr_lock; /* root->ino_idr */ u32 last_id_lowbits; u32 id_highbits; struct kernfs_syscall_ops *syscall_ops; /* list of kernfs_super_info of this root, protected by kernfs_rwsem */ struct list_head supers; wait_queue_head_t deactivate_waitq; struct rw_semaphore kernfs_rwsem; struct rw_semaphore kernfs_iattr_rwsem; struct rw_semaphore kernfs_supers_rwsem; /* kn->parent and kn->name */ rwlock_t kernfs_rename_lock; struct rcu_head rcu; }; /* +1 to avoid triggering overflow warning when negating it */ #define KN_DEACTIVATED_BIAS (INT_MIN + 1) /* KERNFS_TYPE_MASK and types are defined in include/linux/kernfs.h */ /** * kernfs_root - find out the kernfs_root a kernfs_node belongs to * @kn: kernfs_node of interest * * Return: the kernfs_root @kn belongs to. */ static inline struct kernfs_root *kernfs_root(const struct kernfs_node *kn) { const struct kernfs_node *knp; /* if parent exists, it's always a dir; otherwise, @sd is a dir */ guard(rcu)(); knp = rcu_dereference(kn->__parent); if (knp) kn = knp; return kn->dir.root; } /* * mount.c */ struct kernfs_super_info { struct super_block *sb; /* * The root associated with this super_block. Each super_block is * identified by the root and ns it's associated with. */ struct kernfs_root *root; /* * Each sb is associated with one namespace tag, currently the * network namespace of the task which mounted this kernfs * instance. If multiple tags become necessary, make the following * an array and compare kernfs_node tag against every entry. */ const void *ns; /* anchored at kernfs_root->supers, protected by kernfs_rwsem */ struct list_head node; }; #define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info)) static inline bool kernfs_root_is_locked(const struct kernfs_node *kn) { return lockdep_is_held(&kernfs_root(kn)->kernfs_rwsem); } static inline bool kernfs_rename_is_locked(const struct kernfs_node *kn) { return lockdep_is_held(&kernfs_root(kn)->kernfs_rename_lock); } static inline const char *kernfs_rcu_name(const struct kernfs_node *kn) { return rcu_dereference_check(kn->name, kernfs_root_is_locked(kn)); } static inline struct kernfs_node *kernfs_parent(const struct kernfs_node *kn) { /* * The kernfs_node::__parent remains valid within a RCU section. The kn * can be reparented (and renamed) which changes the entry. This can be * avoided by locking kernfs_root::kernfs_rwsem or * kernfs_root::kernfs_rename_lock. * Both locks can be used to obtain a reference on __parent. Once the * reference count reaches 0 then the node is about to be freed * and can not be renamed (or become a different parent) anymore. */ return rcu_dereference_check(kn->__parent, kernfs_root_is_locked(kn) || kernfs_rename_is_locked(kn) || !atomic_read(&kn->count)); } static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry) { if (d_really_is_negative(dentry)) return NULL; return d_inode(dentry)->i_private; } static inline void kernfs_set_rev(struct kernfs_node *parent, struct dentry *dentry) { dentry->d_time = parent->dir.rev; } static inline void kernfs_inc_rev(struct kernfs_node *parent) { parent->dir.rev++; } static inline bool kernfs_dir_changed(struct kernfs_node *parent, struct dentry *dentry) { if (parent->dir.rev != dentry->d_time) return true; return false; } extern const struct super_operations kernfs_sops; extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache; /* * inode.c */ extern const struct xattr_handler * const kernfs_xattr_handlers[]; void kernfs_evict_inode(struct inode *inode); int kernfs_iop_permission(struct mnt_idmap *idmap, struct inode *inode, int mask); int kernfs_iop_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr); int kernfs_iop_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags); ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); /* * dir.c */ extern const struct dentry_operations kernfs_dops; extern const struct file_operations kernfs_dir_fops; extern const struct inode_operations kernfs_dir_iops; struct kernfs_node *kernfs_get_active(struct kernfs_node *kn); void kernfs_put_active(struct kernfs_node *kn); int kernfs_add_one(struct kernfs_node *kn); struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, unsigned flags); /* * file.c */ extern const struct file_operations kernfs_file_fops; bool kernfs_should_drain_open_files(struct kernfs_node *kn); void kernfs_drain_open_files(struct kernfs_node *kn); /* * symlink.c */ extern const struct inode_operations kernfs_symlink_iops; /* * kernfs locks */ extern struct kernfs_global_locks *kernfs_locks; #endif /* __KERNFS_INTERNAL_H */ |
19 19 18 3 3 3 1 1 1 1 1 1 3 3 3 2 3 1 3 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 18 18 18 18 16 16 16 17 18 17 17 18 16 15 16 15 16 2 18 17 17 18 18 2 18 18 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 16 16 17 15 16 16 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 18 18 18 18 17 17 16 16 16 16 16 16 2 2 2 2 2 2 2 2 2 16 16 16 16 15 16 18 18 2 2 18 18 18 18 18 18 18 18 17 18 18 18 2 2 19 19 19 19 17 19 3 18 18 1 3 1 3 2 3 3 2 2 2 2 2 2 2 6 16 16 16 16 16 16 15 16 16 16 16 16 16 16 15 16 16 16 15 16 16 16 16 16 16 16 15 16 16 16 15 15 16 16 15 16 16 16 16 16 16 16 16 15 16 16 13 13 1 1 1 1 1 1 1 13 13 12 13 13 13 13 13 13 13 13 1 13 13 13 13 13 13 13 13 12 13 13 2 13 13 13 13 13 13 13 13 13 13 13 13 13 3 3 3 3 2 3 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 3 3 3 3 1 3 3 3 3 3 3 3 3 2 2 2 2 2 2 2 2 2 2 2 2 2 13 13 13 13 13 13 13 13 13 13 13 12 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 13 13 12 12 13 13 13 13 13 13 13 13 12 13 13 13 13 13 13 13 13 13 13 13 13 12 13 13 13 13 13 13 13 13 13 13 13 13 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 | // SPDX-License-Identifier: GPL-2.0 /* * Block multiqueue core code * * Copyright (C) 2013-2014 Jens Axboe * Copyright (C) 2013-2014 Christoph Hellwig */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <linux/kmemleak.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/llist.h> #include <linux/cpu.h> #include <linux/cache.h> #include <linux/sched/topology.h> #include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/crash_dump.h> #include <linux/prefetch.h> #include <linux/blk-crypto.h> #include <linux/part_stat.h> #include <linux/sched/isolation.h> #include <trace/events/block.h> #include <linux/t10-pi.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-pm.h" #include "blk-stat.h" #include "blk-mq-sched.h" #include "blk-rq-qos.h" static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); static DEFINE_MUTEX(blk_mq_cpuhp_lock); static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags); static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list); static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob, unsigned int flags); /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. */ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { return !list_empty_careful(&hctx->dispatch) || sbitmap_any_bit_set(&hctx->ctx_map) || blk_mq_sched_has_work(hctx); } /* * Mark this ctx as having pending work in this hardware queue */ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { const int bit = ctx->index_hw[hctx->type]; if (!sbitmap_test_bit(&hctx->ctx_map, bit)) sbitmap_set_bit(&hctx->ctx_map, bit); } static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { const int bit = ctx->index_hw[hctx->type]; sbitmap_clear_bit(&hctx->ctx_map, bit); } struct mq_inflight { struct block_device *part; unsigned int inflight[2]; }; static bool blk_mq_check_in_driver(struct request *rq, void *priv) { struct mq_inflight *mi = priv; if (rq->rq_flags & RQF_IO_STAT && (!bdev_is_partition(mi->part) || rq->part == mi->part) && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) mi->inflight[rq_data_dir(rq)]++; return true; } void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]) { struct mq_inflight mi = { .part = part }; blk_mq_queue_tag_busy_iter(bdev_get_queue(part), blk_mq_check_in_driver, &mi); inflight[READ] = mi.inflight[READ]; inflight[WRITE] = mi.inflight[WRITE]; } #ifdef CONFIG_LOCKDEP static bool blk_freeze_set_owner(struct request_queue *q, struct task_struct *owner) { if (!owner) return false; if (!q->mq_freeze_depth) { q->mq_freeze_owner = owner; q->mq_freeze_owner_depth = 1; q->mq_freeze_disk_dead = !q->disk || test_bit(GD_DEAD, &q->disk->state) || !blk_queue_registered(q); q->mq_freeze_queue_dying = blk_queue_dying(q); return true; } if (owner == q->mq_freeze_owner) q->mq_freeze_owner_depth += 1; return false; } /* verify the last unfreeze in owner context */ static bool blk_unfreeze_check_owner(struct request_queue *q) { if (q->mq_freeze_owner != current) return false; if (--q->mq_freeze_owner_depth == 0) { q->mq_freeze_owner = NULL; return true; } return false; } #else static bool blk_freeze_set_owner(struct request_queue *q, struct task_struct *owner) { return false; } static bool blk_unfreeze_check_owner(struct request_queue *q) { return false; } #endif bool __blk_freeze_queue_start(struct request_queue *q, struct task_struct *owner) { bool freeze; mutex_lock(&q->mq_freeze_lock); freeze = blk_freeze_set_owner(q, owner); if (++q->mq_freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); mutex_unlock(&q->mq_freeze_lock); if (queue_is_mq(q)) blk_mq_run_hw_queues(q, false); } else { mutex_unlock(&q->mq_freeze_lock); } return freeze; } void blk_freeze_queue_start(struct request_queue *q) { if (__blk_freeze_queue_start(q, current)) blk_freeze_acquire_lock(q); } EXPORT_SYMBOL_GPL(blk_freeze_queue_start); void blk_mq_freeze_queue_wait(struct request_queue *q) { wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout) { return wait_event_timeout(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter), timeout); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); void blk_mq_freeze_queue_nomemsave(struct request_queue *q) { blk_freeze_queue_start(q); blk_mq_freeze_queue_wait(q); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_nomemsave); bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) { bool unfreeze; mutex_lock(&q->mq_freeze_lock); if (force_atomic) q->q_usage_counter.data->force_atomic = true; q->mq_freeze_depth--; WARN_ON_ONCE(q->mq_freeze_depth < 0); if (!q->mq_freeze_depth) { percpu_ref_resurrect(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); } unfreeze = blk_unfreeze_check_owner(q); mutex_unlock(&q->mq_freeze_lock); return unfreeze; } void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q) { if (__blk_mq_unfreeze_queue(q, false)) blk_unfreeze_release_lock(q); } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_nomemrestore); /* * non_owner variant of blk_freeze_queue_start * * Unlike blk_freeze_queue_start, the queue doesn't need to be unfrozen * by the same task. This is fragile and should not be used if at all * possible. */ void blk_freeze_queue_start_non_owner(struct request_queue *q) { __blk_freeze_queue_start(q, NULL); } EXPORT_SYMBOL_GPL(blk_freeze_queue_start_non_owner); /* non_owner variant of blk_mq_unfreeze_queue */ void blk_mq_unfreeze_queue_non_owner(struct request_queue *q) { __blk_mq_unfreeze_queue(q, false); } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_non_owner); /* * FIXME: replace the scsi_internal_device_*block_nowait() calls in the * mpt3sas driver such that this function can be removed. */ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { unsigned long flags; spin_lock_irqsave(&q->queue_lock, flags); if (!q->quiesce_depth++) blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); spin_unlock_irqrestore(&q->queue_lock, flags); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); /** * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done * @set: tag_set to wait on * * Note: it is driver's responsibility for making sure that quiesce has * been started on or more of the request_queues of the tag_set. This * function only waits for the quiesce on those request_queues that had * the quiesce flag set using blk_mq_quiesce_queue_nowait. */ void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set) { if (set->flags & BLK_MQ_F_BLOCKING) synchronize_srcu(set->srcu); else synchronize_rcu(); } EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); /** * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished * @q: request queue. * * Note: this function does not prevent that the struct request end_io() * callback function is invoked. Once this function is returned, we make * sure no dispatch can happen until the queue is unquiesced via * blk_mq_unquiesce_queue(). */ void blk_mq_quiesce_queue(struct request_queue *q) { blk_mq_quiesce_queue_nowait(q); /* nothing to wait for non-mq queues */ if (queue_is_mq(q)) blk_mq_wait_quiesce_done(q->tag_set); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); /* * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() * @q: request queue. * * This function recovers queue into the state before quiescing * which is done by blk_mq_quiesce_queue. */ void blk_mq_unquiesce_queue(struct request_queue *q) { unsigned long flags; bool run_queue = false; spin_lock_irqsave(&q->queue_lock, flags); if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { ; } else if (!--q->quiesce_depth) { blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); run_queue = true; } spin_unlock_irqrestore(&q->queue_lock, flags); /* dispatch requests which are inserted during quiescing */ if (run_queue) blk_mq_run_hw_queues(q, true); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; mutex_lock(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_quiesce_queue_nowait(q); } mutex_unlock(&set->tag_list_lock); blk_mq_wait_quiesce_done(set); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset); void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; mutex_lock(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_unquiesce_queue(q); } mutex_unlock(&set->tag_list_lock); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_wakeup_all(hctx->tags, true); } void blk_rq_init(struct request_queue *q, struct request *rq) { memset(rq, 0, sizeof(*rq)); INIT_LIST_HEAD(&rq->queuelist); rq->q = q; rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = BLK_MQ_NO_TAG; rq->start_time_ns = blk_time_get_ns(); blk_crypto_rq_set_defaults(rq); } EXPORT_SYMBOL(blk_rq_init); /* Set start and alloc time when the allocated request is actually used */ static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) { #ifdef CONFIG_BLK_RQ_ALLOC_TIME if (blk_queue_rq_alloc_time(rq->q)) rq->alloc_time_ns = alloc_time_ns; else rq->alloc_time_ns = 0; #endif } static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct blk_mq_tags *tags, unsigned int tag) { struct blk_mq_ctx *ctx = data->ctx; struct blk_mq_hw_ctx *hctx = data->hctx; struct request_queue *q = data->q; struct request *rq = tags->static_rqs[tag]; rq->q = q; rq->mq_ctx = ctx; rq->mq_hctx = hctx; rq->cmd_flags = data->cmd_flags; if (data->flags & BLK_MQ_REQ_PM) data->rq_flags |= RQF_PM; rq->rq_flags = data->rq_flags; if (data->rq_flags & RQF_SCHED_TAGS) { rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { rq->tag = tag; rq->internal_tag = BLK_MQ_NO_TAG; } rq->timeout = 0; rq->part = NULL; rq->io_start_time_ns = 0; rq->stats_sectors = 0; rq->nr_phys_segments = 0; rq->nr_integrity_segments = 0; rq->end_io = NULL; rq->end_io_data = NULL; blk_crypto_rq_set_defaults(rq); INIT_LIST_HEAD(&rq->queuelist); /* tag was already set */ WRITE_ONCE(rq->deadline, 0); req_ref_set(rq, 1); if (rq->rq_flags & RQF_USE_SCHED) { struct elevator_queue *e = data->q->elevator; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); if (e->type->ops.prepare_request) e->type->ops.prepare_request(rq); } return rq; } static inline struct request * __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) { unsigned int tag, tag_offset; struct blk_mq_tags *tags; struct request *rq; unsigned long tag_mask; int i, nr = 0; tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); if (unlikely(!tag_mask)) return NULL; tags = blk_mq_tags_from_data(data); for (i = 0; tag_mask; i++) { if (!(tag_mask & (1UL << i))) continue; tag = tag_offset + i; prefetch(tags->static_rqs[tag]); tag_mask &= ~(1UL << i); rq = blk_mq_rq_ctx_init(data, tags, tag); rq_list_add_head(data->cached_rqs, rq); nr++; } if (!(data->rq_flags & RQF_SCHED_TAGS)) blk_mq_add_active_requests(data->hctx, nr); /* caller already holds a reference, add for remainder */ percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); data->nr_tags -= nr; return rq_list_pop(data->cached_rqs); } static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; u64 alloc_time_ns = 0; struct request *rq; unsigned int tag; /* alloc_time includes depth and tag waits */ if (blk_queue_rq_alloc_time(q)) alloc_time_ns = blk_time_get_ns(); if (data->cmd_flags & REQ_NOWAIT) data->flags |= BLK_MQ_REQ_NOWAIT; retry: data->ctx = blk_mq_get_ctx(q); data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx); if (q->elevator) { /* * All requests use scheduler tags when an I/O scheduler is * enabled for the queue. */ data->rq_flags |= RQF_SCHED_TAGS; /* * Flush/passthrough requests are special and go directly to the * dispatch list. */ if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH && !blk_op_is_passthrough(data->cmd_flags)) { struct elevator_mq_ops *ops = &q->elevator->type->ops; WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); data->rq_flags |= RQF_USE_SCHED; if (ops->limit_depth) ops->limit_depth(data->cmd_flags, data); } } else { blk_mq_tag_busy(data->hctx); } if (data->flags & BLK_MQ_REQ_RESERVED) data->rq_flags |= RQF_RESV; /* * Try batched alloc if we want more than 1 tag. */ if (data->nr_tags > 1) { rq = __blk_mq_alloc_requests_batch(data); if (rq) { blk_mq_rq_time_init(rq, alloc_time_ns); return rq; } data->nr_tags = 1; } /* * Waiting allocations only fail because of an inactive hctx. In that * case just retry the hctx assignment and tag allocation as CPU hotplug * should have migrated us to an online CPU by now. */ tag = blk_mq_get_tag(data); if (tag == BLK_MQ_NO_TAG) { if (data->flags & BLK_MQ_REQ_NOWAIT) return NULL; /* * Give up the CPU and sleep for a random short time to * ensure that thread using a realtime scheduling class * are migrated off the CPU, and thus off the hctx that * is going away. */ msleep(3); goto retry; } if (!(data->rq_flags & RQF_SCHED_TAGS)) blk_mq_inc_active_requests(data->hctx); rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag); blk_mq_rq_time_init(rq, alloc_time_ns); return rq; } static struct request *blk_mq_rq_cache_fill(struct request_queue *q, struct blk_plug *plug, blk_opf_t opf, blk_mq_req_flags_t flags) { struct blk_mq_alloc_data data = { .q = q, .flags = flags, .shallow_depth = 0, .cmd_flags = opf, .rq_flags = 0, .nr_tags = plug->nr_ios, .cached_rqs = &plug->cached_rqs, .ctx = NULL, .hctx = NULL }; struct request *rq; if (blk_queue_enter(q, flags)) return NULL; plug->nr_ios = 1; rq = __blk_mq_alloc_requests(&data); if (unlikely(!rq)) blk_queue_exit(q); return rq; } static struct request *blk_mq_alloc_cached_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags) { struct blk_plug *plug = current->plug; struct request *rq; if (!plug) return NULL; if (rq_list_empty(&plug->cached_rqs)) { if (plug->nr_ios == 1) return NULL; rq = blk_mq_rq_cache_fill(q, plug, opf, flags); if (!rq) return NULL; } else { rq = rq_list_peek(&plug->cached_rqs); if (!rq || rq->q != q) return NULL; if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) return NULL; if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) return NULL; rq_list_pop(&plug->cached_rqs); blk_mq_rq_time_init(rq, blk_time_get_ns()); } rq->cmd_flags = opf; INIT_LIST_HEAD(&rq->queuelist); return rq; } struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags) { struct request *rq; rq = blk_mq_alloc_cached_request(q, opf, flags); if (!rq) { struct blk_mq_alloc_data data = { .q = q, .flags = flags, .shallow_depth = 0, .cmd_flags = opf, .rq_flags = 0, .nr_tags = 1, .cached_rqs = NULL, .ctx = NULL, .hctx = NULL }; int ret; ret = blk_queue_enter(q, flags); if (ret) return ERR_PTR(ret); rq = __blk_mq_alloc_requests(&data); if (!rq) goto out_queue_exit; } rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; return rq; out_queue_exit: blk_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); } EXPORT_SYMBOL(blk_mq_alloc_request); struct request *blk_mq_alloc_request_hctx(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) { struct blk_mq_alloc_data data = { .q = q, .flags = flags, .shallow_depth = 0, .cmd_flags = opf, .rq_flags = 0, .nr_tags = 1, .cached_rqs = NULL, .ctx = NULL, .hctx = NULL }; u64 alloc_time_ns = 0; struct request *rq; unsigned int cpu; unsigned int tag; int ret; /* alloc_time includes depth and tag waits */ if (blk_queue_rq_alloc_time(q)) alloc_time_ns = blk_time_get_ns(); /* * If the tag allocator sleeps we could get an allocation for a * different hardware context. No need to complicate the low level * allocator for this for the rare use case of a command tied to * a specific queue. */ if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) return ERR_PTR(-EINVAL); if (hctx_idx >= q->nr_hw_queues) return ERR_PTR(-EIO); ret = blk_queue_enter(q, flags); if (ret) return ERR_PTR(ret); /* * Check if the hardware context is actually mapped to anything. * If not tell the caller that it should skip this queue. */ ret = -EXDEV; data.hctx = xa_load(&q->hctx_table, hctx_idx); if (!blk_mq_hw_queue_mapped(data.hctx)) goto out_queue_exit; cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); if (cpu >= nr_cpu_ids) goto out_queue_exit; data.ctx = __blk_mq_get_ctx(q, cpu); if (q->elevator) data.rq_flags |= RQF_SCHED_TAGS; else blk_mq_tag_busy(data.hctx); if (flags & BLK_MQ_REQ_RESERVED) data.rq_flags |= RQF_RESV; ret = -EWOULDBLOCK; tag = blk_mq_get_tag(&data); if (tag == BLK_MQ_NO_TAG) goto out_queue_exit; if (!(data.rq_flags & RQF_SCHED_TAGS)) blk_mq_inc_active_requests(data.hctx); rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag); blk_mq_rq_time_init(rq, alloc_time_ns); rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; return rq; out_queue_exit: blk_queue_exit(q); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); static void blk_mq_finish_request(struct request *rq) { struct request_queue *q = rq->q; blk_zone_finish_request(rq); if (rq->rq_flags & RQF_USE_SCHED) { q->elevator->type->ops.finish_request(rq); /* * For postflush request that may need to be * completed twice, we should clear this flag * to avoid double finish_request() on the rq. */ rq->rq_flags &= ~RQF_USE_SCHED; } } static void __blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; const int sched_tag = rq->internal_tag; blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); rq->mq_hctx = NULL; if (rq->tag != BLK_MQ_NO_TAG) { blk_mq_dec_active_requests(hctx); blk_mq_put_tag(hctx->tags, ctx, rq->tag); } if (sched_tag != BLK_MQ_NO_TAG) blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); blk_mq_sched_restart(hctx); blk_queue_exit(q); } void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; blk_mq_finish_request(rq); if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->disk->bdi); rq_qos_done(q, rq); WRITE_ONCE(rq->state, MQ_RQ_IDLE); if (req_ref_put_and_test(rq)) __blk_mq_free_request(rq); } EXPORT_SYMBOL_GPL(blk_mq_free_request); void blk_mq_free_plug_rqs(struct blk_plug *plug) { struct request *rq; while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL) blk_mq_free_request(rq); } void blk_dump_rq_flags(struct request *rq, char *msg) { printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, rq->q->disk ? rq->q->disk->disk_name : "?", (__force unsigned long long) rq->cmd_flags); printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, len %u\n", rq->bio, rq->biotail, blk_rq_bytes(rq)); } EXPORT_SYMBOL(blk_dump_rq_flags); static void blk_account_io_completion(struct request *req, unsigned int bytes) { if (req->rq_flags & RQF_IO_STAT) { const int sgrp = op_stat_group(req_op(req)); part_stat_lock(); part_stat_add(req->part, sectors[sgrp], bytes >> 9); part_stat_unlock(); } } static void blk_print_req_error(struct request *req, blk_status_t status) { printk_ratelimited(KERN_ERR "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " "phys_seg %u prio class %u\n", blk_status_to_str(status), req->q->disk ? req->q->disk->disk_name : "?", blk_rq_pos(req), (__force u32)req_op(req), blk_op_str(req_op(req)), (__force u32)(req->cmd_flags & ~REQ_OP_MASK), req->nr_phys_segments, IOPRIO_PRIO_CLASS(req_get_ioprio(req))); } /* * Fully end IO on a request. Does not support partial completions, or * errors. */ static void blk_complete_request(struct request *req) { const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; int total_bytes = blk_rq_bytes(req); struct bio *bio = req->bio; trace_block_rq_complete(req, BLK_STS_OK, total_bytes); if (!bio) return; if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) blk_integrity_complete(req, total_bytes); /* * Upper layers may call blk_crypto_evict_key() anytime after the last * bio_endio(). Therefore, the keyslot must be released before that. */ blk_crypto_rq_put_keyslot(req); blk_account_io_completion(req, total_bytes); do { struct bio *next = bio->bi_next; /* Completion has already been traced */ bio_clear_flag(bio, BIO_TRACE_COMPLETION); blk_zone_update_request_bio(req, bio); if (!is_flush) bio_endio(bio); bio = next; } while (bio); /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request * later. */ if (!req->end_io) { req->bio = NULL; req->__data_len = 0; } } /** * blk_update_request - Complete multiple bytes without completing the request * @req: the request being processed * @error: block status code * @nr_bytes: number of bytes to complete for @req * * Description: * Ends I/O on a number of bytes attached to @req, but doesn't complete * the request structure even if @req doesn't have leftover. * If @req has leftover, sets it up for the next range of segments. * * Passing the result of blk_rq_bytes() as @nr_bytes guarantees * %false return from this function. * * Note: * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function * except in the consistency check at the end of this function. * * Return: * %false - this request doesn't have any more data * %true - this request has more data **/ bool blk_update_request(struct request *req, blk_status_t error, unsigned int nr_bytes) { bool is_flush = req->rq_flags & RQF_FLUSH_SEQ; bool quiet = req->rq_flags & RQF_QUIET; int total_bytes; trace_block_rq_complete(req, error, nr_bytes); if (!req->bio) return false; if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && error == BLK_STS_OK) blk_integrity_complete(req, nr_bytes); /* * Upper layers may call blk_crypto_evict_key() anytime after the last * bio_endio(). Therefore, the keyslot must be released before that. */ if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) __blk_crypto_rq_put_keyslot(req); if (unlikely(error && !blk_rq_is_passthrough(req) && !quiet) && !test_bit(GD_DEAD, &req->q->disk->state)) { blk_print_req_error(req, error); trace_block_rq_error(req, error, nr_bytes); } blk_account_io_completion(req, nr_bytes); total_bytes = 0; while (req->bio) { struct bio *bio = req->bio; unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); if (unlikely(error)) bio->bi_status = error; if (bio_bytes == bio->bi_iter.bi_size) { req->bio = bio->bi_next; } else if (bio_is_zone_append(bio) && error == BLK_STS_OK) { /* * Partial zone append completions cannot be supported * as the BIO fragments may end up not being written * sequentially. */ bio->bi_status = BLK_STS_IOERR; } /* Completion has already been traced */ bio_clear_flag(bio, BIO_TRACE_COMPLETION); if (unlikely(quiet)) bio_set_flag(bio, BIO_QUIET); bio_advance(bio, bio_bytes); /* Don't actually finish bio if it's part of flush sequence */ if (!bio->bi_iter.bi_size) { blk_zone_update_request_bio(req, bio); if (!is_flush) bio_endio(bio); } total_bytes += bio_bytes; nr_bytes -= bio_bytes; if (!nr_bytes) break; } /* * completely done */ if (!req->bio) { /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request * later. */ req->__data_len = 0; return false; } req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ if (!blk_rq_is_passthrough(req)) req->__sector += total_bytes >> 9; /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { req->cmd_flags &= ~REQ_FAILFAST_MASK; req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; } if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { /* * If total number of sectors is less than the first segment * size, something has gone terribly wrong. */ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { blk_dump_rq_flags(req, "request botched"); req->__data_len = blk_rq_cur_bytes(req); } /* recalculate the number of segments */ req->nr_phys_segments = blk_recalc_rq_segments(req); } return true; } EXPORT_SYMBOL_GPL(blk_update_request); static inline void blk_account_io_done(struct request *req, u64 now) { trace_block_io_done(req); /* * Account IO completion. flush_rq isn't accounted as a * normal IO on queueing nor completion. Accounting the * containing request is enough. */ if ((req->rq_flags & (RQF_IO_STAT|RQF_FLUSH_SEQ)) == RQF_IO_STAT) { const int sgrp = op_stat_group(req_op(req)); part_stat_lock(); update_io_ticks(req->part, jiffies, true); part_stat_inc(req->part, ios[sgrp]); part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); part_stat_local_dec(req->part, in_flight[op_is_write(req_op(req))]); part_stat_unlock(); } } static inline bool blk_rq_passthrough_stats(struct request *req) { struct bio *bio = req->bio; if (!blk_queue_passthrough_stat(req->q)) return false; /* Requests without a bio do not transfer data. */ if (!bio) return false; /* * Stats are accumulated in the bdev, so must have one attached to a * bio to track stats. Most drivers do not set the bdev for passthrough * requests, but nvme is one that will set it. */ if (!bio->bi_bdev) return false; /* * We don't know what a passthrough command does, but we know the * payload size and data direction. Ensuring the size is aligned to the * block size filters out most commands with payloads that don't * represent sector access. */ if (blk_rq_bytes(req) & (bdev_logical_block_size(bio->bi_bdev) - 1)) return false; return true; } static inline void blk_account_io_start(struct request *req) { trace_block_io_start(req); if (!blk_queue_io_stat(req->q)) return; if (blk_rq_is_passthrough(req) && !blk_rq_passthrough_stats(req)) return; req->rq_flags |= RQF_IO_STAT; req->start_time_ns = blk_time_get_ns(); /* * All non-passthrough requests are created from a bio with one * exception: when a flush command that is part of a flush sequence * generated by the state machine in blk-flush.c is cloned onto the * lower device by dm-multipath we can get here without a bio. */ if (req->bio) req->part = req->bio->bi_bdev; else req->part = req->q->disk->part0; part_stat_lock(); update_io_ticks(req->part, jiffies, false); part_stat_local_inc(req->part, in_flight[op_is_write(req_op(req))]); part_stat_unlock(); } static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) { if (rq->rq_flags & RQF_STATS) blk_stat_add(rq, now); blk_mq_sched_completed_request(rq, now); blk_account_io_done(rq, now); } inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { if (blk_mq_need_time_stamp(rq)) __blk_mq_end_request_acct(rq, blk_time_get_ns()); blk_mq_finish_request(rq); if (rq->end_io) { rq_qos_done(rq->q, rq); if (rq->end_io(rq, error) == RQ_END_IO_FREE) blk_mq_free_request(rq); } else { blk_mq_free_request(rq); } } EXPORT_SYMBOL(__blk_mq_end_request); void blk_mq_end_request(struct request *rq, blk_status_t error) { if (blk_update_request(rq, error, blk_rq_bytes(rq))) BUG(); __blk_mq_end_request(rq, error); } EXPORT_SYMBOL(blk_mq_end_request); #define TAG_COMP_BATCH 32 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, int *tag_array, int nr_tags) { struct request_queue *q = hctx->queue; blk_mq_sub_active_requests(hctx, nr_tags); blk_mq_put_tags(hctx->tags, tag_array, nr_tags); percpu_ref_put_many(&q->q_usage_counter, nr_tags); } void blk_mq_end_request_batch(struct io_comp_batch *iob) { int tags[TAG_COMP_BATCH], nr_tags = 0; struct blk_mq_hw_ctx *cur_hctx = NULL; struct request *rq; u64 now = 0; if (iob->need_ts) now = blk_time_get_ns(); while ((rq = rq_list_pop(&iob->req_list)) != NULL) { prefetch(rq->bio); prefetch(rq->rq_next); blk_complete_request(rq); if (iob->need_ts) __blk_mq_end_request_acct(rq, now); blk_mq_finish_request(rq); rq_qos_done(rq->q, rq); /* * If end_io handler returns NONE, then it still has * ownership of the request. */ if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE) continue; WRITE_ONCE(rq->state, MQ_RQ_IDLE); if (!req_ref_put_and_test(rq)) continue; blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { if (cur_hctx) blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); nr_tags = 0; cur_hctx = rq->mq_hctx; } tags[nr_tags++] = rq->tag; } if (nr_tags) blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); } EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); static void blk_complete_reqs(struct llist_head *list) { struct llist_node *entry = llist_reverse_order(llist_del_all(list)); struct request *rq, *next; llist_for_each_entry_safe(rq, next, entry, ipi_list) rq->q->mq_ops->complete(rq); } static __latent_entropy void blk_done_softirq(void) { blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); } static int blk_softirq_cpu_dead(unsigned int cpu) { blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); return 0; } static void __blk_mq_complete_request_remote(void *data) { __raise_softirq_irqoff(BLOCK_SOFTIRQ); } static inline bool blk_mq_complete_need_ipi(struct request *rq) { int cpu = raw_smp_processor_id(); if (!IS_ENABLED(CONFIG_SMP) || !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) return false; /* * With force threaded interrupts enabled, raising softirq from an SMP * function call will always result in waking the ksoftirqd thread. * This is probably worse than completing the request on a different * cache domain. */ if (force_irqthreads()) return false; /* same CPU or cache domain and capacity? Complete locally */ if (cpu == rq->mq_ctx->cpu || (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && cpus_share_cache(cpu, rq->mq_ctx->cpu) && cpus_equal_capacity(cpu, rq->mq_ctx->cpu))) return false; /* don't try to IPI to an offline CPU */ return cpu_online(rq->mq_ctx->cpu); } static void blk_mq_complete_send_ipi(struct request *rq) { unsigned int cpu; cpu = rq->mq_ctx->cpu; if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu))) smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu)); } static void blk_mq_raise_softirq(struct request *rq) { struct llist_head *list; preempt_disable(); list = this_cpu_ptr(&blk_cpu_done); if (llist_add(&rq->ipi_list, list)) raise_softirq(BLOCK_SOFTIRQ); preempt_enable(); } bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); /* * For request which hctx has only one ctx mapping, * or a polled request, always complete locally, * it's pointless to redirect the completion. */ if ((rq->mq_hctx->nr_ctx == 1 && rq->mq_ctx->cpu == raw_smp_processor_id()) || rq->cmd_flags & REQ_POLLED) return false; if (blk_mq_complete_need_ipi(rq)) { blk_mq_complete_send_ipi(rq); return true; } if (rq->q->nr_hw_queues == 1) { blk_mq_raise_softirq(rq); return true; } return false; } EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); /** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed * * Description: * Complete a request by scheduling the ->complete_rq operation. **/ void blk_mq_complete_request(struct request *rq) { if (!blk_mq_complete_request_remote(rq)) rq->q->mq_ops->complete(rq); } EXPORT_SYMBOL(blk_mq_complete_request); /** * blk_mq_start_request - Start processing a request * @rq: Pointer to request to be started * * Function used by device drivers to notify the block layer that a request * is going to be processed now, so blk layer can do proper initializations * such as starting the timeout timer. */ void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; trace_block_rq_issue(rq); if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) && !blk_rq_is_passthrough(rq)) { rq->io_start_time_ns = blk_time_get_ns(); rq->stats_sectors = blk_rq_sectors(rq); rq->rq_flags |= RQF_STATS; rq_qos_issue(q, rq); } WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); blk_add_timer(rq); WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); rq->mq_hctx->tags->rqs[rq->tag] = rq; if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) blk_integrity_prepare(rq); if (rq->bio && rq->bio->bi_opf & REQ_POLLED) WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num); } EXPORT_SYMBOL(blk_mq_start_request); /* * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging * requests. */ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) { if (plug->multiple_queues) return BLK_MAX_REQUEST_COUNT * 2; return BLK_MAX_REQUEST_COUNT; } static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) { struct request *last = rq_list_peek(&plug->mq_list); if (!plug->rq_count) { trace_block_plug(rq->q); } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || (!blk_queue_nomerges(rq->q) && blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { blk_mq_flush_plug_list(plug, false); last = NULL; trace_block_plug(rq->q); } if (!plug->multiple_queues && last && last->q != rq->q) plug->multiple_queues = true; /* * Any request allocated from sched tags can't be issued to * ->queue_rqs() directly */ if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) plug->has_elevator = true; rq_list_add_tail(&plug->mq_list, rq); plug->rq_count++; } /** * blk_execute_rq_nowait - insert a request to I/O scheduler for execution * @rq: request to insert * @at_head: insert request at head or tail of queue * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue * for execution. Don't wait for completion. * * Note: * This function will invoke @done directly if the queue is dead. */ void blk_execute_rq_nowait(struct request *rq, bool at_head) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); blk_account_io_start(rq); if (current->plug && !at_head) { blk_add_rq_to_plug(current->plug, rq); return; } blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); struct blk_rq_wait { struct completion done; blk_status_t ret; }; static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret) { struct blk_rq_wait *wait = rq->end_io_data; wait->ret = ret; complete(&wait->done); return RQ_END_IO_NONE; } bool blk_rq_is_poll(struct request *rq) { if (!rq->mq_hctx) return false; if (rq->mq_hctx->type != HCTX_TYPE_POLL) return false; return true; } EXPORT_SYMBOL_GPL(blk_rq_is_poll); static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0); cond_resched(); } while (!completion_done(wait)); } /** * blk_execute_rq - insert a request into queue for execution * @rq: request to insert * @at_head: insert request at head or tail of queue * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue * for execution and wait for completion. * Return: The blk_status_t result provided to blk_mq_end_request(). */ blk_status_t blk_execute_rq(struct request *rq, bool at_head) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_rq_wait wait = { .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), }; WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); rq->end_io_data = &wait; rq->end_io = blk_end_sync_rq; blk_account_io_start(rq); blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, false); if (blk_rq_is_poll(rq)) blk_rq_poll_completion(rq, &wait.done); else blk_wait_io(&wait.done); return wait.ret; } EXPORT_SYMBOL(blk_execute_rq); static void __blk_mq_requeue_request(struct request *rq) { struct request_queue *q = rq->q; blk_mq_put_driver_tag(rq); trace_block_rq_requeue(rq); rq_qos_requeue(q, rq); if (blk_mq_request_started(rq)) { WRITE_ONCE(rq->state, MQ_RQ_IDLE); rq->rq_flags &= ~RQF_TIMED_OUT; } } void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) { struct request_queue *q = rq->q; unsigned long flags; __blk_mq_requeue_request(rq); /* this request will be re-inserted to io scheduler queue */ blk_mq_sched_requeue_request(rq); spin_lock_irqsave(&q->requeue_lock, flags); list_add_tail(&rq->queuelist, &q->requeue_list); spin_unlock_irqrestore(&q->requeue_lock, flags); if (kick_requeue_list) blk_mq_kick_requeue_list(q); } EXPORT_SYMBOL(blk_mq_requeue_request); static void blk_mq_requeue_work(struct work_struct *work) { struct request_queue *q = container_of(work, struct request_queue, requeue_work.work); LIST_HEAD(rq_list); LIST_HEAD(flush_list); struct request *rq; spin_lock_irq(&q->requeue_lock); list_splice_init(&q->requeue_list, &rq_list); list_splice_init(&q->flush_list, &flush_list); spin_unlock_irq(&q->requeue_lock); while (!list_empty(&rq_list)) { rq = list_entry(rq_list.next, struct request, queuelist); list_del_init(&rq->queuelist); /* * If RQF_DONTPREP is set, the request has been started by the * driver already and might have driver-specific data allocated * already. Insert it into the hctx dispatch list to avoid * block layer merges for the request. */ if (rq->rq_flags & RQF_DONTPREP) blk_mq_request_bypass_insert(rq, 0); else blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); } while (!list_empty(&flush_list)) { rq = list_entry(flush_list.next, struct request, queuelist); list_del_init(&rq->queuelist); blk_mq_insert_request(rq, 0); } blk_mq_run_hw_queues(q, false); } void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); } EXPORT_SYMBOL(blk_mq_kick_requeue_list); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs) { kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); static bool blk_is_flush_data_rq(struct request *rq) { return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq); } static bool blk_mq_rq_inflight(struct request *rq, void *priv) { /* * If we find a request that isn't idle we know the queue is busy * as it's checked in the iter. * Return false to stop the iteration. * * In case of queue quiesce, if one flush data request is completed, * don't count it as inflight given the flush sequence is suspended, * and the original flush data request is invisible to driver, just * like other pending requests because of quiesce */ if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) && blk_is_flush_data_rq(rq) && blk_mq_request_completed(rq))) { bool *busy = priv; *busy = true; return false; } return true; } bool blk_mq_queue_inflight(struct request_queue *q) { bool busy = false; blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); return busy; } EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); static void blk_mq_rq_timed_out(struct request *req) { req->rq_flags |= RQF_TIMED_OUT; if (req->q->mq_ops->timeout) { enum blk_eh_timer_return ret; ret = req->q->mq_ops->timeout(req); if (ret == BLK_EH_DONE) return; WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); } blk_add_timer(req); } struct blk_expired_data { bool has_timedout_rq; unsigned long next; unsigned long timeout_start; }; static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) { unsigned long deadline; if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) return false; if (rq->rq_flags & RQF_TIMED_OUT) return false; deadline = READ_ONCE(rq->deadline); if (time_after_eq(expired->timeout_start, deadline)) return true; if (expired->next == 0) expired->next = deadline; else if (time_after(expired->next, deadline)) expired->next = deadline; return false; } void blk_mq_put_rq_ref(struct request *rq) { if (is_flush_rq(rq)) { if (rq->end_io(rq, 0) == RQ_END_IO_FREE) blk_mq_free_request(rq); } else if (req_ref_put_and_test(rq)) { __blk_mq_free_request(rq); } } static bool blk_mq_check_expired(struct request *rq, void *priv) { struct blk_expired_data *expired = priv; /* * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot * be reallocated underneath the timeout handler's processing, then * the expire check is reliable. If the request is not expired, then * it was completed and reallocated as a new request after returning * from blk_mq_check_expired(). */ if (blk_mq_req_expired(rq, expired)) { expired->has_timedout_rq = true; return false; } return true; } static bool blk_mq_handle_expired(struct request *rq, void *priv) { struct blk_expired_data *expired = priv; if (blk_mq_req_expired(rq, expired)) blk_mq_rq_timed_out(rq); return true; } static void blk_mq_timeout_work(struct work_struct *work) { struct request_queue *q = container_of(work, struct request_queue, timeout_work); struct blk_expired_data expired = { .timeout_start = jiffies, }; struct blk_mq_hw_ctx *hctx; unsigned long i; /* A deadlock might occur if a request is stuck requiring a * timeout at the same time a queue freeze is waiting * completion, since the timeout code would not be able to * acquire the queue reference here. * * That's why we don't use blk_queue_enter here; instead, we use * percpu_ref_tryget directly, because we need to be able to * obtain a reference even in the short window between the queue * starting to freeze, by dropping the first reference in * blk_freeze_queue_start, and the moment the last request is * consumed, marked by the instant q_usage_counter reaches * zero. */ if (!percpu_ref_tryget(&q->q_usage_counter)) return; /* check if there is any timed-out request */ blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired); if (expired.has_timedout_rq) { /* * Before walking tags, we must ensure any submit started * before the current time has finished. Since the submit * uses srcu or rcu, wait for a synchronization point to * ensure all running submits have finished */ blk_mq_wait_quiesce_done(q->tag_set); expired.next = 0; blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired); } if (expired.next != 0) { mod_timer(&q->timeout, expired.next); } else { /* * Request timeouts are handled as a forward rolling timer. If * we end up here it means that no requests are pending and * also that no request has been pending for a while. Mark * each hctx as idle. */ queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); } } blk_queue_exit(q); } struct flush_busy_ctx_data { struct blk_mq_hw_ctx *hctx; struct list_head *list; }; static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) { struct flush_busy_ctx_data *flush_data = data; struct blk_mq_hw_ctx *hctx = flush_data->hctx; struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; enum hctx_type type = hctx->type; spin_lock(&ctx->lock); list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); sbitmap_clear_bit(sb, bitnr); spin_unlock(&ctx->lock); return true; } /* * Process software queues that have been marked busy, splicing them * to the for-dispatch */ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) { struct flush_busy_ctx_data data = { .hctx = hctx, .list = list, }; sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); } struct dispatch_rq_data { struct blk_mq_hw_ctx *hctx; struct request *rq; }; static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) { struct dispatch_rq_data *dispatch_data = data; struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; enum hctx_type type = hctx->type; spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_lists[type])) { dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); list_del_init(&dispatch_data->rq->queuelist); if (list_empty(&ctx->rq_lists[type])) sbitmap_clear_bit(sb, bitnr); } spin_unlock(&ctx->lock); return !dispatch_data->rq; } struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start) { unsigned off = start ? start->index_hw[hctx->type] : 0; struct dispatch_rq_data data = { .hctx = hctx, .rq = NULL, }; __sbitmap_for_each_set(&hctx->ctx_map, off, dispatch_rq_from_ctx, &data); return data.rq; } bool __blk_mq_alloc_driver_tag(struct request *rq) { struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; int tag; blk_mq_tag_busy(rq->mq_hctx); if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { bt = &rq->mq_hctx->tags->breserved_tags; tag_offset = 0; } else { if (!hctx_may_queue(rq->mq_hctx, bt)) return false; } tag = __sbitmap_queue_get(bt); if (tag == BLK_MQ_NO_TAG) return false; rq->tag = tag + tag_offset; blk_mq_inc_active_requests(rq->mq_hctx); return true; } static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { struct blk_mq_hw_ctx *hctx; hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); spin_lock(&hctx->dispatch_wait_lock); if (!list_empty(&wait->entry)) { struct sbitmap_queue *sbq; list_del_init(&wait->entry); sbq = &hctx->tags->bitmap_tags; atomic_dec(&sbq->ws_active); } spin_unlock(&hctx->dispatch_wait_lock); blk_mq_run_hw_queue(hctx, true); return 1; } /* * Mark us waiting for a tag. For shared tags, this involves hooking us into * the tag wakeups. For non-shared tags, we can simply mark us needing a * restart. For both cases, take care to check the condition again after * marking us as waiting. */ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) { struct sbitmap_queue *sbq; struct wait_queue_head *wq; wait_queue_entry_t *wait; bool ret; if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && !(blk_mq_is_shared_tags(hctx->flags))) { blk_mq_sched_mark_restart_hctx(hctx); /* * It's possible that a tag was freed in the window between the * allocation failure and adding the hardware queue to the wait * queue. * * Don't clear RESTART here, someone else could have set it. * At most this will cost an extra queue run. */ return blk_mq_get_driver_tag(rq); } wait = &hctx->dispatch_wait; if (!list_empty_careful(&wait->entry)) return false; if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) sbq = &hctx->tags->breserved_tags; else sbq = &hctx->tags->bitmap_tags; wq = &bt_wait_ptr(sbq, hctx)->wait; spin_lock_irq(&wq->lock); spin_lock(&hctx->dispatch_wait_lock); if (!list_empty(&wait->entry)) { spin_unlock(&hctx->dispatch_wait_lock); spin_unlock_irq(&wq->lock); return false; } atomic_inc(&sbq->ws_active); wait->flags &= ~WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq, wait); /* * Add one explicit barrier since blk_mq_get_driver_tag() may * not imply barrier in case of failure. * * Order adding us to wait queue and allocating driver tag. * * The pair is the one implied in sbitmap_queue_wake_up() which * orders clearing sbitmap tag bits and waitqueue_active() in * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless * * Otherwise, re-order of adding wait queue and getting driver tag * may cause __sbitmap_queue_wake_up() to wake up nothing because * the waitqueue_active() may not observe us in wait queue. */ smp_mb(); /* * It's possible that a tag was freed in the window between the * allocation failure and adding the hardware queue to the wait * queue. */ ret = blk_mq_get_driver_tag(rq); if (!ret) { spin_unlock(&hctx->dispatch_wait_lock); spin_unlock_irq(&wq->lock); return false; } /* * We got a tag, remove ourselves from the wait queue to ensure * someone else gets the wakeup. */ list_del_init(&wait->entry); atomic_dec(&sbq->ws_active); spin_unlock(&hctx->dispatch_wait_lock); spin_unlock_irq(&wq->lock); return true; } #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 /* * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): * - EWMA is one simple way to compute running average value * - weight(7/8 and 1/8) is applied so that it can decrease exponentially * - take 4 as factor for avoiding to get too small(0) result, and this * factor doesn't matter because EWMA decreases exponentially */ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) { unsigned int ewma; ewma = hctx->dispatch_busy; if (!ewma && !busy) return; ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; if (busy) ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; hctx->dispatch_busy = ewma; } #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ static void blk_mq_handle_dev_resource(struct request *rq, struct list_head *list) { list_add(&rq->queuelist, list); __blk_mq_requeue_request(rq); } enum prep_dispatch { PREP_DISPATCH_OK, PREP_DISPATCH_NO_TAG, PREP_DISPATCH_NO_BUDGET, }; static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, bool need_budget) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; int budget_token = -1; if (need_budget) { budget_token = blk_mq_get_dispatch_budget(rq->q); if (budget_token < 0) { blk_mq_put_driver_tag(rq); return PREP_DISPATCH_NO_BUDGET; } blk_mq_set_rq_budget_token(rq, budget_token); } if (!blk_mq_get_driver_tag(rq)) { /* * The initial allocation attempt failed, so we need to * rerun the hardware queue when a tag is freed. The * waitqueue takes care of that. If the queue is run * before we add this entry back on the dispatch list, * we'll re-run it below. */ if (!blk_mq_mark_tag_wait(hctx, rq)) { /* * All budgets not got from this function will be put * together during handling partial dispatch */ if (need_budget) blk_mq_put_dispatch_budget(rq->q, budget_token); return PREP_DISPATCH_NO_TAG; } } return PREP_DISPATCH_OK; } /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ static void blk_mq_release_budgets(struct request_queue *q, struct list_head *list) { struct request *rq; list_for_each_entry(rq, list, queuelist) { int budget_token = blk_mq_get_rq_budget_token(rq); if (budget_token >= 0) blk_mq_put_dispatch_budget(q, budget_token); } } /* * blk_mq_commit_rqs will notify driver using bd->last that there is no * more requests. (See comment in struct blk_mq_ops for commit_rqs for * details) * Attention, we should explicitly call this in unusual cases: * 1) did not queue everything initially scheduled to queue * 2) the last attempt to queue a request failed */ static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, bool from_schedule) { if (hctx->queue->mq_ops->commit_rqs && queued) { trace_block_unplug(hctx->queue, queued, !from_schedule); hctx->queue->mq_ops->commit_rqs(hctx); } } /* * Returns true if we did some work AND can potentially do more. */ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, bool get_budget) { enum prep_dispatch prep; struct request_queue *q = hctx->queue; struct request *rq; int queued; blk_status_t ret = BLK_STS_OK; bool needs_resource = false; if (list_empty(list)) return false; /* * Now process all the entries, sending them to the driver. */ queued = 0; do { struct blk_mq_queue_data bd; rq = list_first_entry(list, struct request, queuelist); WARN_ON_ONCE(hctx != rq->mq_hctx); prep = blk_mq_prep_dispatch_rq(rq, get_budget); if (prep != PREP_DISPATCH_OK) break; list_del_init(&rq->queuelist); bd.rq = rq; bd.last = list_empty(list); ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { case BLK_STS_OK: queued++; break; case BLK_STS_RESOURCE: needs_resource = true; fallthrough; case BLK_STS_DEV_RESOURCE: blk_mq_handle_dev_resource(rq, list); goto out; default: blk_mq_end_request(rq, ret); } } while (!list_empty(list)); out: /* If we didn't flush the entire list, we could have told the driver * there was more coming, but that turned out to be a lie. */ if (!list_empty(list) || ret != BLK_STS_OK) blk_mq_commit_rqs(hctx, queued, false); /* * Any items that need requeuing? Stuff them into hctx->dispatch, * that is where we will continue on next queue run. */ if (!list_empty(list)) { bool needs_restart; /* For non-shared tags, the RESTART check will suffice */ bool no_tag = prep == PREP_DISPATCH_NO_TAG && ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || blk_mq_is_shared_tags(hctx->flags)); /* * If the caller allocated budgets, free the budgets of the * requests that have not yet been passed to the block driver. */ if (!get_budget) blk_mq_release_budgets(q, list); spin_lock(&hctx->lock); list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); /* * Order adding requests to hctx->dispatch and checking * SCHED_RESTART flag. The pair of this smp_mb() is the one * in blk_mq_sched_restart(). Avoid restart code path to * miss the new added requests to hctx->dispatch, meantime * SCHED_RESTART is observed here. */ smp_mb(); /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another * thread and hence that a queue rerun is needed. * * If 'no_tag' is set, that means that we failed getting * a driver tag with an I/O scheduler attached. If our dispatch * waitqueue is no longer active, ensure that we run the queue * AFTER adding our entries back to the list. * * If no I/O scheduler has been configured it is possible that * the hardware queue got stopped and restarted before requests * were pushed back onto the dispatch list. Rerun the queue to * avoid starvation. Notes: * - blk_mq_run_hw_queue() checks whether or not a queue has * been stopped before rerunning a queue. * - Some but not all block drivers stop a queue before * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq * and dm-rq. * * If driver returns BLK_STS_RESOURCE and SCHED_RESTART * bit is set, run queue after a delay to avoid IO stalls * that could otherwise occur if the queue is idle. We'll do * similar if we couldn't get budget or couldn't lock a zone * and SCHED_RESTART is set. */ needs_restart = blk_mq_sched_needs_restart(hctx); if (prep == PREP_DISPATCH_NO_BUDGET) needs_resource = true; if (!needs_restart || (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) blk_mq_run_hw_queue(hctx, true); else if (needs_resource) blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); blk_mq_update_dispatch_busy(hctx, true); return false; } blk_mq_update_dispatch_busy(hctx, false); return true; } static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) { int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(hctx->cpumask); return cpu; } /* * ->next_cpu is always calculated from hctx->cpumask, so simply use * it for speeding up the check */ static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx) { return hctx->next_cpu >= nr_cpu_ids; } /* * It'd be great if the workqueue API had a way to pass * in a mask and had some smarts for more clever placement. * For now we just round-robin here, switching for every * BLK_MQ_CPU_WORK_BATCH queued items. */ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) { bool tried = false; int next_cpu = hctx->next_cpu; /* Switch to unbound if no allowable CPUs in this hctx */ if (hctx->queue->nr_hw_queues == 1 || blk_mq_hctx_empty_cpumask(hctx)) return WORK_CPU_UNBOUND; if (--hctx->next_cpu_batch <= 0) { select_cpu: next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, cpu_online_mask); if (next_cpu >= nr_cpu_ids) next_cpu = blk_mq_first_mapped_cpu(hctx); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } /* * Do unbound schedule if we can't find a online CPU for this hctx, * and it should only happen in the path of handling CPU DEAD. */ if (!cpu_online(next_cpu)) { if (!tried) { tried = true; goto select_cpu; } /* * Make sure to re-select CPU next time once after CPUs * in hctx->cpumask become online again. */ hctx->next_cpu = next_cpu; hctx->next_cpu_batch = 1; return WORK_CPU_UNBOUND; } hctx->next_cpu = next_cpu; return next_cpu; } /** * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. * @hctx: Pointer to the hardware queue to run. * @msecs: Milliseconds of delay to wait before running the queue. * * Run a hardware queue asynchronously with a delay of @msecs. */ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) { if (unlikely(blk_mq_hctx_stopped(hctx))) return; kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); static inline bool blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx *hctx) { bool need_run; /* * When queue is quiesced, we may be switching io scheduler, or * updating nr_hw_queues, or other things, and we can't run queue * any more, even blk_mq_hctx_has_pending() can't be called safely. * * And queue will be rerun in blk_mq_unquiesce_queue() if it is * quiesced. */ __blk_mq_run_dispatch_ops(hctx->queue, false, need_run = !blk_queue_quiesced(hctx->queue) && blk_mq_hctx_has_pending(hctx)); return need_run; } /** * blk_mq_run_hw_queue - Start to run a hardware queue. * @hctx: Pointer to the hardware queue to run. * @async: If we want to run the queue asynchronously. * * Check if the request queue is not in a quiesced state and if there are * pending requests to be sent. If this is true, run the queue to send requests * to hardware. */ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { bool need_run; /* * We can't run the queue inline with interrupts disabled. */ WARN_ON_ONCE(!async && in_interrupt()); might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING); need_run = blk_mq_hw_queue_need_run(hctx); if (!need_run) { unsigned long flags; /* * Synchronize with blk_mq_unquiesce_queue(), because we check * if hw queue is quiesced locklessly above, we need the use * ->queue_lock to make sure we see the up-to-date status to * not miss rerunning the hw queue. */ spin_lock_irqsave(&hctx->queue->queue_lock, flags); need_run = blk_mq_hw_queue_need_run(hctx); spin_unlock_irqrestore(&hctx->queue->queue_lock, flags); if (!need_run) return; } if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { blk_mq_delay_run_hw_queue(hctx, 0); return; } blk_mq_run_dispatch_ops(hctx->queue, blk_mq_sched_dispatch_requests(hctx)); } EXPORT_SYMBOL(blk_mq_run_hw_queue); /* * Return prefered queue to dispatch from (if any) for non-mq aware IO * scheduler. */ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) { struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); /* * If the IO scheduler does not respect hardware queues when * dispatching, we just don't bother with multiple HW queues and * dispatch from hctx for the current CPU since running multiple queues * just causes lock contention inside the scheduler and pointless cache * bouncing. */ struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; if (!blk_mq_hctx_stopped(hctx)) return hctx; return NULL; } /** * blk_mq_run_hw_queues - Run all hardware queues in a request queue. * @q: Pointer to the request queue to run. * @async: If we want to run the queue asynchronously. */ void blk_mq_run_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx, *sq_hctx; unsigned long i; sq_hctx = NULL; if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; /* * Dispatch from this hctx either if there's no hctx preferred * by IO scheduler or if it has requests that bypass the * scheduler. */ if (!sq_hctx || sq_hctx == hctx || !list_empty_careful(&hctx->dispatch)) blk_mq_run_hw_queue(hctx, async); } } EXPORT_SYMBOL(blk_mq_run_hw_queues); /** * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. * @q: Pointer to the request queue to run. * @msecs: Milliseconds of delay to wait before running the queues. */ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) { struct blk_mq_hw_ctx *hctx, *sq_hctx; unsigned long i; sq_hctx = NULL; if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; /* * If there is already a run_work pending, leave the * pending delay untouched. Otherwise, a hctx can stall * if another hctx is re-delaying the other's work * before the work executes. */ if (delayed_work_pending(&hctx->run_work)) continue; /* * Dispatch from this hctx either if there's no hctx preferred * by IO scheduler or if it has requests that bypass the * scheduler. */ if (!sq_hctx || sq_hctx == hctx || !list_empty_careful(&hctx->dispatch)) blk_mq_delay_run_hw_queue(hctx, msecs); } } EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); /* * This function is often used for pausing .queue_rq() by driver when * there isn't enough resource or some conditions aren't satisfied, and * BLK_STS_RESOURCE is usually returned. * * We do not guarantee that dispatch can be drained or blocked * after blk_mq_stop_hw_queue() returns. Please use * blk_mq_quiesce_queue() for that requirement. */ void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { cancel_delayed_work(&hctx->run_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } EXPORT_SYMBOL(blk_mq_stop_hw_queue); /* * This function is often used for pausing .queue_rq() by driver when * there isn't enough resource or some conditions aren't satisfied, and * BLK_STS_RESOURCE is usually returned. * * We do not guarantee that dispatch can be drained or blocked * after blk_mq_stop_hw_queues() returns. Please use * blk_mq_quiesce_queue() for that requirement. */ void blk_mq_stop_hw_queues(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_stop_hw_queue(hctx); } EXPORT_SYMBOL(blk_mq_stop_hw_queues); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); } EXPORT_SYMBOL(blk_mq_start_hw_queue); void blk_mq_start_hw_queues(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_start_hw_queue(hctx); } EXPORT_SYMBOL(blk_mq_start_hw_queues); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { if (!blk_mq_hctx_stopped(hctx)) return; clear_bit(BLK_MQ_S_STOPPED, &hctx->state); /* * Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the * clearing of BLK_MQ_S_STOPPED above and the checking of dispatch * list in the subsequent routine. */ smp_mb__after_atomic(); blk_mq_run_hw_queue(hctx, async); } EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_start_stopped_hw_queue(hctx, async || (hctx->flags & BLK_MQ_F_BLOCKING)); } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); blk_mq_run_dispatch_ops(hctx->queue, blk_mq_sched_dispatch_requests(hctx)); } /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. * @flags: BLK_MQ_INSERT_* * * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; spin_lock(&hctx->lock); if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &hctx->dispatch); else list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); } static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list, bool run_queue_async) { struct request *rq; enum hctx_type type = hctx->type; /* * Try to issue requests directly if the hw queue isn't busy to save an * extra enqueue & dequeue to the sw queue. */ if (!hctx->dispatch_busy && !run_queue_async) { blk_mq_run_dispatch_ops(hctx->queue, blk_mq_try_issue_list_directly(hctx, list)); if (list_empty(list)) goto out; } /* * preemption doesn't flush plug list, so it's possible ctx->cpu is * offline now */ list_for_each_entry(rq, list, queuelist) { BUG_ON(rq->mq_ctx != ctx); trace_block_rq_insert(rq); if (rq->cmd_flags & REQ_NOWAIT) run_queue_async = true; } spin_lock(&ctx->lock); list_splice_tail_init(list, &ctx->rq_lists[type]); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); out: blk_mq_run_hw_queue(hctx, run_queue_async); } static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (blk_rq_is_passthrough(rq)) { /* * Passthrough request have to be added to hctx->dispatch * directly. The device may be in a situation where it can't * handle FS request, and always returns BLK_STS_RESOURCE for * them, which gets them added to hctx->dispatch. * * If a passthrough request is required to unblock the queues, * and it is added to the scheduler queue, there is no chance to * dispatch it given we prioritize requests in hctx->dispatch. */ blk_mq_request_bypass_insert(rq, flags); } else if (req_op(rq) == REQ_OP_FLUSH) { /* * Firstly normal IO request is inserted to scheduler queue or * sw queue, meantime we add flush request to dispatch queue( * hctx->dispatch) directly and there is at most one in-flight * flush request for each hw queue, so it doesn't matter to add * flush request to tail or front of the dispatch queue. * * Secondly in case of NCQ, flush request belongs to non-NCQ * command, and queueing it will fail when there is any * in-flight normal IO request(NCQ command). When adding flush * rq to the front of hctx->dispatch, it is easier to introduce * extra time to flush rq's latency because of S_SCHED_RESTART * compared with adding to the tail of dispatch queue, then * chance of flush merge is increased, and less flush requests * will be issued to controller. It is observed that ~10% time * is saved in blktests block/004 on disk attached to AHCI/NCQ * drive when adding flush rq to the front of hctx->dispatch. * * Simply queue flush rq to the front of hctx->dispatch so that * intensive flush workloads can benefit in case of NCQ HW. */ blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD); } else if (q->elevator) { LIST_HEAD(list); WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); list_add(&rq->queuelist, &list); q->elevator->type->ops.insert_requests(hctx, &list, flags); } else { trace_block_rq_insert(rq); spin_lock(&ctx->lock); if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); else list_add_tail(&rq->queuelist, &ctx->rq_lists[hctx->type]); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, unsigned int nr_segs) { int err; if (bio->bi_opf & REQ_RAHEAD) rq->cmd_flags |= REQ_FAILFAST_MASK; rq->bio = rq->biotail = bio; rq->__sector = bio->bi_iter.bi_sector; rq->__data_len = bio->bi_iter.bi_size; rq->nr_phys_segments = nr_segs; if (bio_integrity(bio)) rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, bio); /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); WARN_ON_ONCE(err); blk_account_io_start(rq); } static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { .rq = rq, .last = last, }; blk_status_t ret; /* * For OK queue, we are done. For error, caller may kill it. * Any other error (busy), just add it to our list as we * previously would have done. */ ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { case BLK_STS_OK: blk_mq_update_dispatch_busy(hctx, false); break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_update_dispatch_busy(hctx, true); __blk_mq_requeue_request(rq); break; default: blk_mq_update_dispatch_busy(hctx, false); break; } return ret; } static bool blk_mq_get_budget_and_tag(struct request *rq) { int budget_token; budget_token = blk_mq_get_dispatch_budget(rq->q); if (budget_token < 0) return false; blk_mq_set_rq_budget_token(rq, budget_token); if (!blk_mq_get_driver_tag(rq)) { blk_mq_put_dispatch_budget(rq->q, budget_token); return false; } return true; } /** * blk_mq_try_issue_directly - Try to send a request directly to device driver. * @hctx: Pointer of the associated hardware queue. * @rq: Pointer to request to be sent. * * If the device has enough resources to accept a new request now, send the * request directly to device driver. Else, insert at hctx->dispatch queue, so * we can try send it another time in the future. Requests inserted at this * queue have higher priority. */ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) { blk_status_t ret; if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, false); return; } if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) { blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT); return; } ret = __blk_mq_issue_directly(hctx, rq, true); switch (ret) { case BLK_STS_OK: break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_request_bypass_insert(rq, 0); blk_mq_run_hw_queue(hctx, false); break; default: blk_mq_end_request(rq, ret); break; } } static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, false); return BLK_STS_OK; } if (!blk_mq_get_budget_and_tag(rq)) return BLK_STS_RESOURCE; return __blk_mq_issue_directly(hctx, rq, last); } static void blk_mq_issue_direct(struct rq_list *rqs) { struct blk_mq_hw_ctx *hctx = NULL; struct request *rq; int queued = 0; blk_status_t ret = BLK_STS_OK; while ((rq = rq_list_pop(rqs))) { bool last = rq_list_empty(rqs); if (hctx != rq->mq_hctx) { if (hctx) { blk_mq_commit_rqs(hctx, queued, false); queued = 0; } hctx = rq->mq_hctx; } ret = blk_mq_request_issue_directly(rq, last); switch (ret) { case BLK_STS_OK: queued++; break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_request_bypass_insert(rq, 0); blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); break; } } out: if (ret != BLK_STS_OK) blk_mq_commit_rqs(hctx, queued, false); } static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs) { if (blk_queue_quiesced(q)) return; q->mq_ops->queue_rqs(rqs); } static unsigned blk_mq_extract_queue_requests(struct rq_list *rqs, struct rq_list *queue_rqs) { struct request *rq = rq_list_pop(rqs); struct request_queue *this_q = rq->q; struct request **prev = &rqs->head; struct rq_list matched_rqs = {}; struct request *last = NULL; unsigned depth = 1; rq_list_add_tail(&matched_rqs, rq); while ((rq = *prev)) { if (rq->q == this_q) { /* move rq from rqs to matched_rqs */ *prev = rq->rq_next; rq_list_add_tail(&matched_rqs, rq); depth++; } else { /* leave rq in rqs */ prev = &rq->rq_next; last = rq; } } rqs->tail = last; *queue_rqs = matched_rqs; return depth; } static void blk_mq_dispatch_queue_requests(struct rq_list *rqs, unsigned depth) { struct request_queue *q = rq_list_peek(rqs)->q; trace_block_unplug(q, depth, true); /* * Peek first request and see if we have a ->queue_rqs() hook. * If we do, we can dispatch the whole list in one go. * We already know at this point that all requests belong to the * same queue, caller must ensure that's the case. */ if (q->mq_ops->queue_rqs) { blk_mq_run_dispatch_ops(q, __blk_mq_flush_list(q, rqs)); if (rq_list_empty(rqs)) return; } blk_mq_run_dispatch_ops(q, blk_mq_issue_direct(rqs)); } static void blk_mq_dispatch_list(struct rq_list *rqs, bool from_sched) { struct blk_mq_hw_ctx *this_hctx = NULL; struct blk_mq_ctx *this_ctx = NULL; struct rq_list requeue_list = {}; unsigned int depth = 0; bool is_passthrough = false; LIST_HEAD(list); do { struct request *rq = rq_list_pop(rqs); if (!this_hctx) { this_hctx = rq->mq_hctx; this_ctx = rq->mq_ctx; is_passthrough = blk_rq_is_passthrough(rq); } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || is_passthrough != blk_rq_is_passthrough(rq)) { rq_list_add_tail(&requeue_list, rq); continue; } list_add_tail(&rq->queuelist, &list); depth++; } while (!rq_list_empty(rqs)); *rqs = requeue_list; trace_block_unplug(this_hctx->queue, depth, !from_sched); percpu_ref_get(&this_hctx->queue->q_usage_counter); /* passthrough requests should never be issued to the I/O scheduler */ if (is_passthrough) { spin_lock(&this_hctx->lock); list_splice_tail_init(&list, &this_hctx->dispatch); spin_unlock(&this_hctx->lock); blk_mq_run_hw_queue(this_hctx, from_sched); } else if (this_hctx->queue->elevator) { this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, &list, 0); blk_mq_run_hw_queue(this_hctx, from_sched); } else { blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); } percpu_ref_put(&this_hctx->queue->q_usage_counter); } static void blk_mq_dispatch_multiple_queue_requests(struct rq_list *rqs) { do { struct rq_list queue_rqs; unsigned depth; depth = blk_mq_extract_queue_requests(rqs, &queue_rqs); blk_mq_dispatch_queue_requests(&queue_rqs, depth); while (!rq_list_empty(&queue_rqs)) blk_mq_dispatch_list(&queue_rqs, false); } while (!rq_list_empty(rqs)); } void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { unsigned int depth; /* * We may have been called recursively midway through handling * plug->mq_list via a schedule() in the driver's queue_rq() callback. * To avoid mq_list changing under our feet, clear rq_count early and * bail out specifically if rq_count is 0 rather than checking * whether the mq_list is empty. */ if (plug->rq_count == 0) return; depth = plug->rq_count; plug->rq_count = 0; if (!plug->has_elevator && !from_schedule) { if (plug->multiple_queues) { blk_mq_dispatch_multiple_queue_requests(&plug->mq_list); return; } blk_mq_dispatch_queue_requests(&plug->mq_list, depth); if (rq_list_empty(&plug->mq_list)) return; } do { blk_mq_dispatch_list(&plug->mq_list, from_schedule); } while (!rq_list_empty(&plug->mq_list)); } static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; blk_status_t ret = BLK_STS_OK; while (!list_empty(list)) { struct request *rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); ret = blk_mq_request_issue_directly(rq, list_empty(list)); switch (ret) { case BLK_STS_OK: queued++; break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_request_bypass_insert(rq, 0); if (list_empty(list)) blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); break; } } out: if (ret != BLK_STS_OK) blk_mq_commit_rqs(hctx, queued, false); } static bool blk_mq_attempt_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { if (blk_attempt_plug_merge(q, bio, nr_segs)) return true; if (blk_mq_sched_bio_merge(q, bio, nr_segs)) return true; } return false; } static struct request *blk_mq_get_new_requests(struct request_queue *q, struct blk_plug *plug, struct bio *bio) { struct blk_mq_alloc_data data = { .q = q, .flags = 0, .shallow_depth = 0, .cmd_flags = bio->bi_opf, .rq_flags = 0, .nr_tags = 1, .cached_rqs = NULL, .ctx = NULL, .hctx = NULL }; struct request *rq; rq_qos_throttle(q, bio); if (plug) { data.nr_tags = plug->nr_ios; plug->nr_ios = 1; data.cached_rqs = &plug->cached_rqs; } rq = __blk_mq_alloc_requests(&data); if (unlikely(!rq)) rq_qos_cleanup(q, bio); return rq; } /* * Check if there is a suitable cached request and return it. */ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, struct request_queue *q, blk_opf_t opf) { enum hctx_type type = blk_mq_get_hctx_type(opf); struct request *rq; if (!plug) return NULL; rq = rq_list_peek(&plug->cached_rqs); if (!rq || rq->q != q) return NULL; if (type != rq->mq_hctx->type && (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT)) return NULL; if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) return NULL; return rq; } static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, struct bio *bio) { if (rq_list_pop(&plug->cached_rqs) != rq) WARN_ON_ONCE(1); /* * If any qos ->throttle() end up blocking, we will have flushed the * plug and hence killed the cached_rq list as well. Pop this entry * before we throttle. */ rq_qos_throttle(rq->q, bio); blk_mq_rq_time_init(rq, blk_time_get_ns()); rq->cmd_flags = bio->bi_opf; INIT_LIST_HEAD(&rq->queuelist); } static bool bio_unaligned(const struct bio *bio, struct request_queue *q) { unsigned int bs_mask = queue_logical_block_size(q) - 1; /* .bi_sector of any zero sized bio need to be initialized */ if ((bio->bi_iter.bi_size & bs_mask) || ((bio->bi_iter.bi_sector << SECTOR_SHIFT) & bs_mask)) return true; return false; } /** * blk_mq_submit_bio - Create and send a request to block device. * @bio: Bio pointer. * * Builds up a request structure from @q and @bio and send to the device. The * request may not be queued directly to hardware if: * * This request can be merged with another one * * We want to place request at plug queue for possible future merging * * There is an IO scheduler active at this queue * * It will not queue the request if there is an error with the bio, or at the * request creation. */ void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blk_plug *plug = current->plug; const int is_sync = op_is_sync(bio->bi_opf); struct blk_mq_hw_ctx *hctx; unsigned int nr_segs; struct request *rq; blk_status_t ret; /* * If the plug has a cached request for this queue, try to use it. */ rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); /* * A BIO that was released from a zone write plug has already been * through the preparation in this function, already holds a reference * on the queue usage counter, and is the only write BIO in-flight for * the target zone. Go straight to preparing a request for it. */ if (bio_zone_write_plugging(bio)) { nr_segs = bio->__bi_nr_segments; if (rq) blk_queue_exit(q); goto new_request; } /* * The cached request already holds a q_usage_counter reference and we * don't have to acquire a new one if we use it. */ if (!rq) { if (unlikely(bio_queue_enter(bio))) return; } /* * Device reconfiguration may change logical block size or reduce the * number of poll queues, so the checks for alignment and poll support * have to be done with queue usage counter held. */ if (unlikely(bio_unaligned(bio, q))) { bio_io_error(bio); goto queue_exit; } if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) { bio->bi_status = BLK_STS_NOTSUPP; bio_endio(bio); goto queue_exit; } bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); if (!bio) goto queue_exit; if (!bio_integrity_prep(bio)) goto queue_exit; if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) goto queue_exit; if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs)) goto queue_exit; new_request: if (rq) { blk_mq_use_cached_rq(rq, plug, bio); } else { rq = blk_mq_get_new_requests(q, plug, bio); if (unlikely(!rq)) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); goto queue_exit; } } trace_block_getrq(bio); rq_qos_track(q, rq, bio); blk_mq_bio_to_request(rq, bio, nr_segs); ret = blk_crypto_rq_get_keyslot(rq); if (ret != BLK_STS_OK) { bio->bi_status = ret; bio_endio(bio); blk_mq_free_request(rq); return; } if (bio_zone_write_plugging(bio)) blk_zone_write_plug_init_request(rq); if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) return; if (plug) { blk_add_rq_to_plug(plug, rq); return; } hctx = rq->mq_hctx; if ((rq->rq_flags & RQF_USE_SCHED) || (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, true); } else { blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); } return; queue_exit: /* * Don't drop the queue reference if we were trying to use a cached * request and thus didn't acquire one. */ if (!rq) blk_queue_exit(q); } #ifdef CONFIG_BLK_MQ_STACKING /** * blk_insert_cloned_request - Helper for stacking drivers to submit a request * @rq: the request being queued */ blk_status_t blk_insert_cloned_request(struct request *rq) { struct request_queue *q = rq->q; unsigned int max_sectors = blk_queue_get_max_sectors(rq); unsigned int max_segments = blk_rq_get_max_segments(rq); blk_status_t ret; if (blk_rq_sectors(rq) > max_sectors) { /* * SCSI device does not have a good way to return if * Write Same/Zero is actually supported. If a device rejects * a non-read/write command (discard, write same,etc.) the * low-level device driver will set the relevant queue limit to * 0 to prevent blk-lib from issuing more of the offending * operations. Commands queued prior to the queue limit being * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O * errors being propagated to upper layers. */ if (max_sectors == 0) return BLK_STS_NOTSUPP; printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", __func__, blk_rq_sectors(rq), max_sectors); return BLK_STS_IOERR; } /* * The queue settings related to segment counting may differ from the * original queue. */ rq->nr_phys_segments = blk_recalc_rq_segments(rq); if (rq->nr_phys_segments > max_segments) { printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n", __func__, rq->nr_phys_segments, max_segments); return BLK_STS_IOERR; } if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) return BLK_STS_IOERR; ret = blk_crypto_rq_get_keyslot(rq); if (ret != BLK_STS_OK) return ret; blk_account_io_start(rq); /* * Since we have a scheduler attached on the top device, * bypass a potential scheduler on the bottom device for * insert. */ blk_mq_run_dispatch_ops(q, ret = blk_mq_request_issue_directly(rq, true)); if (ret) blk_account_io_done(rq, blk_time_get_ns()); return ret; } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); /** * blk_rq_unprep_clone - Helper function to free all bios in a cloned request * @rq: the clone request to be cleaned up * * Description: * Free all bios in @rq for a cloned request. */ void blk_rq_unprep_clone(struct request *rq) { struct bio *bio; while ((bio = rq->bio) != NULL) { rq->bio = bio->bi_next; bio_put(bio); } } EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /** * blk_rq_prep_clone - Helper function to setup clone request * @rq: the request to be setup * @rq_src: original request to be cloned * @bs: bio_set that bios for clone are allocated from * @gfp_mask: memory allocation mask for bio * @bio_ctr: setup function to be called for each clone bio. * Returns %0 for success, non %0 for failure. * @data: private data to be passed to @bio_ctr * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. * So cloned bios must be completed before original bios, which means * the caller must complete @rq before @rq_src. */ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) { struct bio *bio_src; if (!bs) bs = &fs_bio_set; __rq_for_each_bio(bio_src, rq_src) { struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, bs); if (!bio) goto free_and_out; if (bio_ctr && bio_ctr(bio, bio_src, data)) { bio_put(bio); goto free_and_out; } if (rq->bio) { rq->biotail->bi_next = bio; rq->biotail = bio; } else { rq->bio = rq->biotail = bio; } } /* Copy attributes of the original request to the clone request. */ rq->__sector = blk_rq_pos(rq_src); rq->__data_len = blk_rq_bytes(rq_src); if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { rq->rq_flags |= RQF_SPECIAL_PAYLOAD; rq->special_vec = rq_src->special_vec; } rq->nr_phys_segments = rq_src->nr_phys_segments; rq->nr_integrity_segments = rq_src->nr_integrity_segments; if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) goto free_and_out; return 0; free_and_out: blk_rq_unprep_clone(rq); return -ENOMEM; } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); #endif /* CONFIG_BLK_MQ_STACKING */ /* * Steal bios from a request and add them to a bio list. * The request must not have been partially completed before. */ void blk_steal_bios(struct bio_list *list, struct request *rq) { if (rq->bio) { if (list->tail) list->tail->bi_next = rq->bio; else list->head = rq->bio; list->tail = rq->biotail; rq->bio = NULL; rq->biotail = NULL; } rq->__data_len = 0; } EXPORT_SYMBOL_GPL(blk_steal_bios); static size_t order_to_size(unsigned int order) { return (size_t)PAGE_SIZE << order; } /* called before freeing request pool in @tags */ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct blk_mq_tags *tags) { struct page *page; unsigned long flags; /* * There is no need to clear mapping if driver tags is not initialized * or the mapping belongs to the driver tags. */ if (!drv_tags || drv_tags == tags) return; list_for_each_entry(page, &tags->page_list, lru) { unsigned long start = (unsigned long)page_address(page); unsigned long end = start + order_to_size(page->private); int i; for (i = 0; i < drv_tags->nr_tags; i++) { struct request *rq = drv_tags->rqs[i]; unsigned long rq_addr = (unsigned long)rq; if (rq_addr >= start && rq_addr < end) { WARN_ON_ONCE(req_ref_read(rq) != 0); cmpxchg(&drv_tags->rqs[i], rq, NULL); } } } /* * Wait until all pending iteration is done. * * Request reference is cleared and it is guaranteed to be observed * after the ->lock is released. */ spin_lock_irqsave(&drv_tags->lock, flags); spin_unlock_irqrestore(&drv_tags->lock, flags); } void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { struct blk_mq_tags *drv_tags; struct page *page; if (list_empty(&tags->page_list)) return; if (blk_mq_is_shared_tags(set->flags)) drv_tags = set->shared_tags; else drv_tags = set->tags[hctx_idx]; if (tags->static_rqs && set->ops->exit_request) { int i; for (i = 0; i < tags->nr_tags; i++) { struct request *rq = tags->static_rqs[i]; if (!rq) continue; set->ops->exit_request(set, rq, hctx_idx); tags->static_rqs[i] = NULL; } } blk_mq_clear_rq_mapping(drv_tags, tags); while (!list_empty(&tags->page_list)) { page = list_first_entry(&tags->page_list, struct page, lru); list_del_init(&page->lru); /* * Remove kmemleak object previously allocated in * blk_mq_alloc_rqs(). */ kmemleak_free(page_address(page)); __free_pages(page, page->private); } } void blk_mq_free_rq_map(struct blk_mq_tags *tags) { kfree(tags->rqs); tags->rqs = NULL; kfree(tags->static_rqs); tags->static_rqs = NULL; blk_mq_free_tags(tags); } static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, unsigned int hctx_idx) { int i; for (i = 0; i < set->nr_maps; i++) { unsigned int start = set->map[i].queue_offset; unsigned int end = start + set->map[i].nr_queues; if (hctx_idx >= start && hctx_idx < end) break; } if (i >= set->nr_maps) i = HCTX_TYPE_DEFAULT; return i; } static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, unsigned int hctx_idx) { enum hctx_type type = hctx_idx_to_type(set, hctx_idx); return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); } static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int nr_tags, unsigned int reserved_tags) { int node = blk_mq_get_hctx_node(set, hctx_idx); struct blk_mq_tags *tags; if (node == NUMA_NO_NODE) node = set->numa_node; tags = blk_mq_init_tags(nr_tags, reserved_tags, set->flags, node); if (!tags) return NULL; tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->rqs) goto err_free_tags; tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->static_rqs) goto err_free_rqs; return tags; err_free_rqs: kfree(tags->rqs); err_free_tags: blk_mq_free_tags(tags); return NULL; } static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, int node) { int ret; if (set->ops->init_request) { ret = set->ops->init_request(set, rq, hctx_idx, node); if (ret) return ret; } WRITE_ONCE(rq->state, MQ_RQ_IDLE); return 0; } static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx, unsigned int depth) { unsigned int i, j, entries_per_page, max_order = 4; int node = blk_mq_get_hctx_node(set, hctx_idx); size_t rq_size, left; if (node == NUMA_NO_NODE) node = set->numa_node; INIT_LIST_HEAD(&tags->page_list); /* * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ rq_size = round_up(sizeof(struct request) + set->cmd_size, cache_line_size()); left = rq_size * depth; for (i = 0; i < depth; ) { int this_order = max_order; struct page *page; int to_do; void *p; while (this_order && left < order_to_size(this_order - 1)) this_order--; do { page = alloc_pages_node(node, GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, this_order); if (page) break; if (!this_order--) break; if (order_to_size(this_order) < rq_size) break; } while (1); if (!page) goto fail; page->private = this_order; list_add_tail(&page->lru, &tags->page_list); p = page_address(page); /* * Allow kmemleak to scan these pages as they contain pointers * to additional allocations like via ops->init_request(). */ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); entries_per_page = order_to_size(this_order) / rq_size; to_do = min(entries_per_page, depth - i); left -= to_do * rq_size; for (j = 0; j < to_do; j++) { struct request *rq = p; tags->static_rqs[i] = rq; if (blk_mq_init_request(set, rq, hctx_idx, node)) { tags->static_rqs[i] = NULL; goto fail; } p += rq_size; i++; } } return 0; fail: blk_mq_free_rqs(set, tags, hctx_idx); return -ENOMEM; } struct rq_iter_data { struct blk_mq_hw_ctx *hctx; bool has_rq; }; static bool blk_mq_has_request(struct request *rq, void *data) { struct rq_iter_data *iter_data = data; if (rq->mq_hctx != iter_data->hctx) return true; iter_data->has_rq = true; return false; } static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->sched_tags ? hctx->sched_tags : hctx->tags; struct rq_iter_data data = { .hctx = hctx, }; blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); return data.has_rq; } static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx, unsigned int this_cpu) { enum hctx_type type = hctx->type; int cpu; /* * hctx->cpumask has to rule out isolated CPUs, but userspace still * might submit IOs on these isolated CPUs, so use the queue map to * check if all CPUs mapped to this hctx are offline */ for_each_online_cpu(cpu) { struct blk_mq_hw_ctx *h = blk_mq_map_queue_type(hctx->queue, type, cpu); if (h != hctx) continue; /* this hctx has at least one online CPU */ if (this_cpu != cpu) return true; } return false; } static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) { struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_online); if (blk_mq_hctx_has_online_cpu(hctx, cpu)) return 0; /* * Prevent new request from being allocated on the current hctx. * * The smp_mb__after_atomic() Pairs with the implied barrier in * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is * seen once we return from the tag allocator. */ set_bit(BLK_MQ_S_INACTIVE, &hctx->state); smp_mb__after_atomic(); /* * Try to grab a reference to the queue and wait for any outstanding * requests. If we could not grab a reference the queue has been * frozen and there are no requests. */ if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { while (blk_mq_hctx_has_requests(hctx)) msleep(5); percpu_ref_put(&hctx->queue->q_usage_counter); } return 0; } /* * Check if one CPU is mapped to the specified hctx * * Isolated CPUs have been ruled out from hctx->cpumask, which is supposed * to be used for scheduling kworker only. For other usage, please call this * helper for checking if one CPU belongs to the specified hctx */ static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu, const struct blk_mq_hw_ctx *hctx) { struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue, hctx->type, cpu); return mapped_hctx == hctx; } static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) { struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_online); if (blk_mq_cpu_mapped_to_hctx(cpu, hctx)) clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); return 0; } /* * 'cpu' is going away. splice any existing rq_list entries from this * software queue to the hw queue dispatch list, and ensure that it * gets run. */ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; LIST_HEAD(tmp); enum hctx_type type; hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx)) return 0; ctx = __blk_mq_get_ctx(hctx->queue, cpu); type = hctx->type; spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_lists[type])) { list_splice_init(&ctx->rq_lists[type], &tmp); blk_mq_hctx_clear_pending(hctx, ctx); } spin_unlock(&ctx->lock); if (list_empty(&tmp)) return 0; spin_lock(&hctx->lock); list_splice_tail_init(&tmp, &hctx->dispatch); spin_unlock(&hctx->lock); blk_mq_run_hw_queue(hctx, true); return 0; } static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) { lockdep_assert_held(&blk_mq_cpuhp_lock); if (!(hctx->flags & BLK_MQ_F_STACKING) && !hlist_unhashed(&hctx->cpuhp_online)) { cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, &hctx->cpuhp_online); INIT_HLIST_NODE(&hctx->cpuhp_online); } if (!hlist_unhashed(&hctx->cpuhp_dead)) { cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); INIT_HLIST_NODE(&hctx->cpuhp_dead); } } static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) { mutex_lock(&blk_mq_cpuhp_lock); __blk_mq_remove_cpuhp(hctx); mutex_unlock(&blk_mq_cpuhp_lock); } static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx) { lockdep_assert_held(&blk_mq_cpuhp_lock); if (!(hctx->flags & BLK_MQ_F_STACKING) && hlist_unhashed(&hctx->cpuhp_online)) cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, &hctx->cpuhp_online); if (hlist_unhashed(&hctx->cpuhp_dead)) cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); } static void __blk_mq_remove_cpuhp_list(struct list_head *head) { struct blk_mq_hw_ctx *hctx; lockdep_assert_held(&blk_mq_cpuhp_lock); list_for_each_entry(hctx, head, hctx_list) __blk_mq_remove_cpuhp(hctx); } /* * Unregister cpuhp callbacks from exited hw queues * * Safe to call if this `request_queue` is live */ static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q) { LIST_HEAD(hctx_list); spin_lock(&q->unused_hctx_lock); list_splice_init(&q->unused_hctx_list, &hctx_list); spin_unlock(&q->unused_hctx_lock); mutex_lock(&blk_mq_cpuhp_lock); __blk_mq_remove_cpuhp_list(&hctx_list); mutex_unlock(&blk_mq_cpuhp_lock); spin_lock(&q->unused_hctx_lock); list_splice(&hctx_list, &q->unused_hctx_list); spin_unlock(&q->unused_hctx_lock); } /* * Register cpuhp callbacks from all hw queues * * Safe to call if this `request_queue` is live */ static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; mutex_lock(&blk_mq_cpuhp_lock); queue_for_each_hw_ctx(q, hctx, i) __blk_mq_add_cpuhp(hctx); mutex_unlock(&blk_mq_cpuhp_lock); } /* * Before freeing hw queue, clearing the flush request reference in * tags->rqs[] for avoiding potential UAF. */ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, unsigned int queue_depth, struct request *flush_rq) { int i; unsigned long flags; /* The hw queue may not be mapped yet */ if (!tags) return; WARN_ON_ONCE(req_ref_read(flush_rq) != 0); for (i = 0; i < queue_depth; i++) cmpxchg(&tags->rqs[i], flush_rq, NULL); /* * Wait until all pending iteration is done. * * Request reference is cleared and it is guaranteed to be observed * after the ->lock is released. */ spin_lock_irqsave(&tags->lock, flags); spin_unlock_irqrestore(&tags->lock, flags); } /* hctx->ctxs will be freed in queue's release handler */ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { struct request *flush_rq = hctx->fq->flush_rq; if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); if (blk_queue_init_done(q)) blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], set->queue_depth, flush_rq); if (set->ops->exit_request) set->ops->exit_request(set, flush_rq, hctx_idx); if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); xa_erase(&q->hctx_table, hctx_idx); spin_lock(&q->unused_hctx_lock); list_add(&hctx->hctx_list, &q->unused_hctx_list); spin_unlock(&q->unused_hctx_lock); } static void blk_mq_exit_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set, int nr_queue) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) { if (i == nr_queue) break; blk_mq_remove_cpuhp(hctx); blk_mq_exit_hctx(q, set, hctx, i); } } static int blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) { hctx->queue_num = hctx_idx; hctx->tags = set->tags[hctx_idx]; if (set->ops->init_hctx && set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) goto fail; if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx->numa_node)) goto exit_hctx; if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) goto exit_flush_rq; return 0; exit_flush_rq: if (set->ops->exit_request) set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); fail: return -1; } static struct blk_mq_hw_ctx * blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, int node) { struct blk_mq_hw_ctx *hctx; gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); if (!hctx) goto fail_alloc_hctx; if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) goto free_hctx; atomic_set(&hctx->nr_active, 0); if (node == NUMA_NO_NODE) node = set->numa_node; hctx->numa_node = node; INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); INIT_HLIST_NODE(&hctx->cpuhp_dead); INIT_HLIST_NODE(&hctx->cpuhp_online); hctx->queue = q; hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; INIT_LIST_HEAD(&hctx->hctx_list); /* * Allocate space for all possible cpus to avoid allocation at * runtime */ hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), gfp, node); if (!hctx->ctxs) goto free_cpumask; if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), gfp, node, false, false)) goto free_ctxs; hctx->nr_ctx = 0; spin_lock_init(&hctx->dispatch_wait_lock); init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); INIT_LIST_HEAD(&hctx->dispatch_wait.entry); hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); if (!hctx->fq) goto free_bitmap; blk_mq_hctx_kobj_init(hctx); return hctx; free_bitmap: sbitmap_free(&hctx->ctx_map); free_ctxs: kfree(hctx->ctxs); free_cpumask: free_cpumask_var(hctx->cpumask); free_hctx: kfree(hctx); fail_alloc_hctx: return NULL; } static void blk_mq_init_cpu_queues(struct request_queue *q, unsigned int nr_hw_queues) { struct blk_mq_tag_set *set = q->tag_set; unsigned int i, j; for_each_possible_cpu(i) { struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); struct blk_mq_hw_ctx *hctx; int k; __ctx->cpu = i; spin_lock_init(&__ctx->lock); for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) INIT_LIST_HEAD(&__ctx->rq_lists[k]); __ctx->queue = q; /* * Set local node, IFF we have more than one hw queue. If * not, we remain on the home node of the device */ for (j = 0; j < set->nr_maps; j++) { hctx = blk_mq_map_queue_type(q, j, i); if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) hctx->numa_node = cpu_to_node(i); } } } struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth) { struct blk_mq_tags *tags; int ret; tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); if (!tags) return NULL; ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); if (ret) { blk_mq_free_rq_map(tags); return NULL; } return tags; } static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, int hctx_idx) { if (blk_mq_is_shared_tags(set->flags)) { set->tags[hctx_idx] = set->shared_tags; return true; } set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, set->queue_depth); return set->tags[hctx_idx]; } void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { if (tags) { blk_mq_free_rqs(set, tags, hctx_idx); blk_mq_free_rq_map(tags); } } static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx) { if (!blk_mq_is_shared_tags(set->flags)) blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); set->tags[hctx_idx] = NULL; } static void blk_mq_map_swqueue(struct request_queue *q) { unsigned int j, hctx_idx; unsigned long i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; hctx->dispatch_from = NULL; } /* * Map software to hardware queues. * * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { ctx = per_cpu_ptr(q->queue_ctx, i); for (j = 0; j < set->nr_maps; j++) { if (!set->map[j].nr_queues) { ctx->hctxs[j] = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, i); continue; } hctx_idx = set->map[j].mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { /* * If tags initialization fail for some hctx, * that hctx won't be brought online. In this * case, remap the current ctx to hctx[0] which * is guaranteed to always have tags allocated */ set->map[j].mq_map[i] = 0; } hctx = blk_mq_map_queue_type(q, j, i); ctx->hctxs[j] = hctx; /* * If the CPU is already set in the mask, then we've * mapped this one already. This can happen if * devices share queues across queue maps. */ if (cpumask_test_cpu(i, hctx->cpumask)) continue; cpumask_set_cpu(i, hctx->cpumask); hctx->type = j; ctx->index_hw[hctx->type] = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; /* * If the nr_ctx type overflows, we have exceeded the * amount of sw queues we can support. */ BUG_ON(!hctx->nr_ctx); } for (; j < HCTX_MAX_TYPES; j++) ctx->hctxs[j] = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, i); } queue_for_each_hw_ctx(q, hctx, i) { int cpu; /* * If no software queues are mapped to this hardware queue, * disable it and free the request entries. */ if (!hctx->nr_ctx) { /* Never unmap queue 0. We need it as a * fallback in case of a new remap fails * allocation */ if (i) __blk_mq_free_map_and_rqs(set, i); hctx->tags = NULL; continue; } hctx->tags = set->tags[i]; WARN_ON(!hctx->tags); /* * Set the map size to the number of mapped software queues. * This is more accurate and more efficient than looping * over all possibly mapped software queues. */ sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); /* * Rule out isolated CPUs from hctx->cpumask to avoid * running block kworker on isolated CPUs */ for_each_cpu(cpu, hctx->cpumask) { if (cpu_is_isolated(cpu)) cpumask_clear_cpu(cpu, hctx->cpumask); } /* * Initialize batch roundrobin counts */ hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } } /* * Caller needs to ensure that we're either frozen/quiesced, or that * the queue isn't live yet. */ static void queue_set_hctx_shared(struct request_queue *q, bool shared) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) { if (shared) { hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; } else { blk_mq_tag_idle(hctx); hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; } } } static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, bool shared) { struct request_queue *q; unsigned int memflags; lockdep_assert_held(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { memflags = blk_mq_freeze_queue(q); queue_set_hctx_shared(q, shared); blk_mq_unfreeze_queue(q, memflags); } } static void blk_mq_del_queue_tag_set(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; mutex_lock(&set->tag_list_lock); list_del(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ blk_mq_update_tag_set_shared(set, false); } mutex_unlock(&set->tag_list_lock); INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, struct request_queue *q) { mutex_lock(&set->tag_list_lock); /* * Check to see if we're transitioning to shared (from 1 to 2 queues). */ if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ blk_mq_update_tag_set_shared(set, true); } if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) queue_set_hctx_shared(q, true); list_add_tail(&q->tag_set_list, &set->tag_list); mutex_unlock(&set->tag_list_lock); } /* All allocations will be freed in release handler of q->mq_kobj */ static int blk_mq_alloc_ctxs(struct request_queue *q) { struct blk_mq_ctxs *ctxs; int cpu; ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); if (!ctxs) return -ENOMEM; ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); if (!ctxs->queue_ctx) goto fail; for_each_possible_cpu(cpu) { struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); ctx->ctxs = ctxs; } q->mq_kobj = &ctxs->kobj; q->queue_ctx = ctxs->queue_ctx; return 0; fail: kfree(ctxs); return -ENOMEM; } /* * It is the actual release handler for mq, but we do it from * request queue's release handler for avoiding use-after-free * and headache because q->mq_kobj shouldn't have been introduced, * but we can't group ctx/kctx kobj without it. */ void blk_mq_release(struct request_queue *q) { struct blk_mq_hw_ctx *hctx, *next; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); /* all hctx are in .unused_hctx_list now */ list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { list_del_init(&hctx->hctx_list); kobject_put(&hctx->kobj); } xa_destroy(&q->hctx_table); /* * release .mq_kobj and sw queue's kobject now because * both share lifetime with request queue. */ blk_mq_sysfs_deinit(q); } struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata) { struct queue_limits default_lim = { }; struct request_queue *q; int ret; if (!lim) lim = &default_lim; lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; if (set->nr_maps > HCTX_TYPE_POLL) lim->features |= BLK_FEAT_POLL; q = blk_alloc_queue(lim, set->numa_node); if (IS_ERR(q)) return q; q->queuedata = queuedata; ret = blk_mq_init_allocated_queue(set, q); if (ret) { blk_put_queue(q); return ERR_PTR(ret); } return q; } EXPORT_SYMBOL(blk_mq_alloc_queue); /** * blk_mq_destroy_queue - shutdown a request queue * @q: request queue to shutdown * * This shuts down a request queue allocated by blk_mq_alloc_queue(). All future * requests will be failed with -ENODEV. The caller is responsible for dropping * the reference from blk_mq_alloc_queue() by calling blk_put_queue(). * * Context: can sleep */ void blk_mq_destroy_queue(struct request_queue *q) { WARN_ON_ONCE(!queue_is_mq(q)); WARN_ON_ONCE(blk_queue_registered(q)); might_sleep(); blk_queue_flag_set(QUEUE_FLAG_DYING, q); blk_queue_start_drain(q); blk_mq_freeze_queue_wait(q); blk_sync_queue(q); blk_mq_cancel_work_sync(q); blk_mq_exit_queue(q); } EXPORT_SYMBOL(blk_mq_destroy_queue); struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, struct queue_limits *lim, void *queuedata, struct lock_class_key *lkclass) { struct request_queue *q; struct gendisk *disk; q = blk_mq_alloc_queue(set, lim, queuedata); if (IS_ERR(q)) return ERR_CAST(q); disk = __alloc_disk_node(q, set->numa_node, lkclass); if (!disk) { blk_mq_destroy_queue(q); blk_put_queue(q); return ERR_PTR(-ENOMEM); } set_bit(GD_OWNS_QUEUE, &disk->state); return disk; } EXPORT_SYMBOL(__blk_mq_alloc_disk); struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, struct lock_class_key *lkclass) { struct gendisk *disk; if (!blk_get_queue(q)) return NULL; disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass); if (!disk) blk_put_queue(q); return disk; } EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); /* * Only hctx removed from cpuhp list can be reused */ static bool blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx *hctx) { return hlist_unhashed(&hctx->cpuhp_online) && hlist_unhashed(&hctx->cpuhp_dead); } static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( struct blk_mq_tag_set *set, struct request_queue *q, int hctx_idx, int node) { struct blk_mq_hw_ctx *hctx = NULL, *tmp; /* reuse dead hctx first */ spin_lock(&q->unused_hctx_lock); list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) { hctx = tmp; break; } } if (hctx) list_del_init(&hctx->hctx_list); spin_unlock(&q->unused_hctx_lock); if (!hctx) hctx = blk_mq_alloc_hctx(q, set, node); if (!hctx) goto fail; if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) goto free_hctx; return hctx; free_hctx: kobject_put(&hctx->kobj); fail: return NULL; } static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i, j; for (i = 0; i < set->nr_hw_queues; i++) { int old_node; int node = blk_mq_get_hctx_node(set, i); struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); if (old_hctx) { old_node = old_hctx->numa_node; blk_mq_exit_hctx(q, set, old_hctx, i); } if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { if (!old_hctx) break; pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", node, old_node); hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); WARN_ON_ONCE(!hctx); } } /* * Increasing nr_hw_queues fails. Free the newly allocated * hctxs and keep the previous q->nr_hw_queues. */ if (i != set->nr_hw_queues) { j = q->nr_hw_queues; } else { j = i; q->nr_hw_queues = set->nr_hw_queues; } xa_for_each_start(&q->hctx_table, j, hctx, j) blk_mq_exit_hctx(q, set, hctx, j); } static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct request_queue *q) { __blk_mq_realloc_hw_ctxs(set, q); /* unregister cpuhp callbacks for exited hctxs */ blk_mq_remove_hw_queues_cpuhp(q); /* register cpuhp for new initialized hctxs */ blk_mq_add_hw_queues_cpuhp(q); } int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) { /* mark the queue as mq asap */ q->mq_ops = set->ops; /* * ->tag_set has to be setup before initialize hctx, which cpuphp * handler needs it for checking queue mapping */ q->tag_set = set; if (blk_mq_alloc_ctxs(q)) goto err_exit; /* init q->mq_kobj and sw queues' kobjects */ blk_mq_sysfs_init(q); INIT_LIST_HEAD(&q->unused_hctx_list); spin_lock_init(&q->unused_hctx_lock); xa_init(&q->hctx_table); blk_mq_realloc_hw_ctxs(set, q); if (!q->nr_hw_queues) goto err_hctxs; INIT_WORK(&q->timeout_work, blk_mq_timeout_work); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_LIST_HEAD(&q->flush_list); INIT_LIST_HEAD(&q->requeue_list); spin_lock_init(&q->requeue_lock); q->nr_requests = set->queue_depth; blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_map_swqueue(q); blk_mq_add_queue_tag_set(set, q); return 0; err_hctxs: blk_mq_release(q); err_exit: q->mq_ops = NULL; return -ENOMEM; } EXPORT_SYMBOL(blk_mq_init_allocated_queue); /* tags can _not_ be used after returning from blk_mq_exit_queue */ void blk_mq_exit_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ blk_mq_del_queue_tag_set(q); } static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; if (blk_mq_is_shared_tags(set->flags)) { set->shared_tags = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, set->queue_depth); if (!set->shared_tags) return -ENOMEM; } for (i = 0; i < set->nr_hw_queues; i++) { if (!__blk_mq_alloc_map_and_rqs(set, i)) goto out_unwind; cond_resched(); } return 0; out_unwind: while (--i >= 0) __blk_mq_free_map_and_rqs(set, i); if (blk_mq_is_shared_tags(set->flags)) { blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } return -ENOMEM; } /* * Allocate the request maps associated with this tag_set. Note that this * may reduce the depth asked for, if memory is tight. set->queue_depth * will be updated to reflect the allocated depth. */ static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) { unsigned int depth; int err; depth = set->queue_depth; do { err = __blk_mq_alloc_rq_maps(set); if (!err) break; set->queue_depth >>= 1; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { err = -ENOMEM; break; } } while (set->queue_depth); if (!set->queue_depth || err) { pr_err("blk-mq: failed to allocate request map\n"); return -ENOMEM; } if (depth != set->queue_depth) pr_info("blk-mq: reduced tag depth (%u -> %u)\n", depth, set->queue_depth); return 0; } static void blk_mq_update_queue_map(struct blk_mq_tag_set *set) { /* * blk_mq_map_queues() and multiple .map_queues() implementations * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the * number of hardware queues. */ if (set->nr_maps == 1) set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; if (set->ops->map_queues) { int i; /* * transport .map_queues is usually done in the following * way: * * for (queue = 0; queue < set->nr_hw_queues; queue++) { * mask = get_cpu_mask(queue) * for_each_cpu(cpu, mask) * set->map[x].mq_map[cpu] = queue; * } * * When we need to remap, the table has to be cleared for * killing stale mapping since one CPU may not be mapped * to any hw queue. */ for (i = 0; i < set->nr_maps; i++) blk_mq_clear_mq_map(&set->map[i]); set->ops->map_queues(set); } else { BUG_ON(set->nr_maps > 1); blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } } static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, int new_nr_hw_queues) { struct blk_mq_tags **new_tags; int i; if (set->nr_hw_queues >= new_nr_hw_queues) goto done; new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!new_tags) return -ENOMEM; if (set->tags) memcpy(new_tags, set->tags, set->nr_hw_queues * sizeof(*set->tags)); kfree(set->tags); set->tags = new_tags; for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) { if (!__blk_mq_alloc_map_and_rqs(set, i)) { while (--i >= set->nr_hw_queues) __blk_mq_free_map_and_rqs(set, i); return -ENOMEM; } cond_resched(); } done: set->nr_hw_queues = new_nr_hw_queues; return 0; } /* * Alloc a tag set to be associated with one or more request queues. * May fail with EINVAL for various error conditions. May adjust the * requested depth down, if it's too large. In that case, the set * value will be stored in set->queue_depth. */ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) { int i, ret; BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); if (!set->nr_hw_queues) return -EINVAL; if (!set->queue_depth) return -EINVAL; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; if (!set->ops->queue_rq) return -EINVAL; if (!set->ops->get_budget ^ !set->ops->put_budget) return -EINVAL; if (set->queue_depth > BLK_MQ_MAX_DEPTH) { pr_info("blk-mq: reduced tag depth to %u\n", BLK_MQ_MAX_DEPTH); set->queue_depth = BLK_MQ_MAX_DEPTH; } if (!set->nr_maps) set->nr_maps = 1; else if (set->nr_maps > HCTX_MAX_TYPES) return -EINVAL; /* * If a crashdump is active, then we are potentially in a very * memory constrained environment. Limit us to 64 tags to prevent * using too much memory. */ if (is_kdump_kernel()) set->queue_depth = min(64U, set->queue_depth); /* * There is no use for more h/w queues than cpus if we just have * a single map */ if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) set->nr_hw_queues = nr_cpu_ids; if (set->flags & BLK_MQ_F_BLOCKING) { set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL); if (!set->srcu) return -ENOMEM; ret = init_srcu_struct(set->srcu); if (ret) goto out_free_srcu; } init_rwsem(&set->update_nr_hwq_lock); ret = -ENOMEM; set->tags = kcalloc_node(set->nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!set->tags) goto out_cleanup_srcu; for (i = 0; i < set->nr_maps; i++) { set->map[i].mq_map = kcalloc_node(nr_cpu_ids, sizeof(set->map[i].mq_map[0]), GFP_KERNEL, set->numa_node); if (!set->map[i].mq_map) goto out_free_mq_map; set->map[i].nr_queues = set->nr_hw_queues; } blk_mq_update_queue_map(set); ret = blk_mq_alloc_set_map_and_rqs(set); if (ret) goto out_free_mq_map; mutex_init(&set->tag_list_lock); INIT_LIST_HEAD(&set->tag_list); return 0; out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); set->map[i].mq_map = NULL; } kfree(set->tags); set->tags = NULL; out_cleanup_srcu: if (set->flags & BLK_MQ_F_BLOCKING) cleanup_srcu_struct(set->srcu); out_free_srcu: if (set->flags & BLK_MQ_F_BLOCKING) kfree(set->srcu); return ret; } EXPORT_SYMBOL(blk_mq_alloc_tag_set); /* allocate and initialize a tagset for a simple single-queue device */ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int queue_depth, unsigned int set_flags) { memset(set, 0, sizeof(*set)); set->ops = ops; set->nr_hw_queues = 1; set->nr_maps = 1; set->queue_depth = queue_depth; set->numa_node = NUMA_NO_NODE; set->flags = set_flags; return blk_mq_alloc_tag_set(set); } EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j; for (i = 0; i < set->nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); if (blk_mq_is_shared_tags(set->flags)) { blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } for (j = 0; j < set->nr_maps; j++) { kfree(set->map[j].mq_map); set->map[j].mq_map = NULL; } kfree(set->tags); set->tags = NULL; if (set->flags & BLK_MQ_F_BLOCKING) { cleanup_srcu_struct(set->srcu); kfree(set->srcu); } } EXPORT_SYMBOL(blk_mq_free_tag_set); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; int ret; unsigned long i; if (WARN_ON_ONCE(!q->mq_freeze_depth)) return -EINVAL; if (!set) return -EINVAL; if (q->nr_requests == nr) return 0; blk_mq_quiesce_queue(q); ret = 0; queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->tags) continue; /* * If we're using an MQ scheduler, just update the scheduler * queue depth. This is similar to what the old code would do. */ if (hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); } else { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, false); } if (ret) break; if (q->elevator && q->elevator->type->ops.depth_updated) q->elevator->type->ops.depth_updated(hctx); } if (!ret) { q->nr_requests = nr; if (blk_mq_is_shared_tags(set->flags)) { if (q->elevator) blk_mq_tag_update_sched_shared_tags(q); else blk_mq_tag_resize_shared_tags(set, nr); } } blk_mq_unquiesce_queue(q); return ret; } static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { struct request_queue *q; int prev_nr_hw_queues = set->nr_hw_queues; unsigned int memflags; int i; lockdep_assert_held(&set->tag_list_lock); if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) nr_hw_queues = nr_cpu_ids; if (nr_hw_queues < 1) return; if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) return; memflags = memalloc_noio_save(); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_debugfs_unregister_hctxs(q); blk_mq_sysfs_unregister_hctxs(q); } list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue_nomemsave(q); if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) { list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue_nomemrestore(q); goto reregister; } fallback: blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { __blk_mq_realloc_hw_ctxs(set, q); if (q->nr_hw_queues != set->nr_hw_queues) { int i = prev_nr_hw_queues; pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", nr_hw_queues, prev_nr_hw_queues); for (; i < set->nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); set->nr_hw_queues = prev_nr_hw_queues; goto fallback; } blk_mq_map_swqueue(q); } /* elv_update_nr_hw_queues() unfreeze queue for us */ list_for_each_entry(q, &set->tag_list, tag_set_list) elv_update_nr_hw_queues(q); reregister: list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_sysfs_register_hctxs(q); blk_mq_debugfs_register_hctxs(q); blk_mq_remove_hw_queues_cpuhp(q); blk_mq_add_hw_queues_cpuhp(q); } memalloc_noio_restore(memflags); /* Free the excess tags when nr_hw_queues shrink. */ for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); } void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { down_write(&set->update_nr_hwq_lock); mutex_lock(&set->tag_list_lock); __blk_mq_update_nr_hw_queues(set, nr_hw_queues); mutex_unlock(&set->tag_list_lock); up_write(&set->update_nr_hwq_lock); } EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob, unsigned int flags) { long state = get_current_state(); int ret; do { ret = q->mq_ops->poll(hctx, iob); if (ret > 0) { __set_current_state(TASK_RUNNING); return ret; } if (signal_pending_state(state, current)) __set_current_state(TASK_RUNNING); if (task_is_running(current)) return 1; if (ret < 0 || (flags & BLK_POLL_ONESHOT)) break; cpu_relax(); } while (!need_resched()); __set_current_state(TASK_RUNNING); return 0; } int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, unsigned int flags) { if (!blk_mq_can_poll(q)) return 0; return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags); } int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, unsigned int poll_flags) { struct request_queue *q = rq->q; int ret; if (!blk_rq_is_poll(rq)) return 0; if (!percpu_ref_tryget(&q->q_usage_counter)) return 0; ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags); blk_queue_exit(q); return ret; } EXPORT_SYMBOL_GPL(blk_rq_poll); unsigned int blk_mq_rq_cpu(struct request *rq) { return rq->mq_ctx->cpu; } EXPORT_SYMBOL(blk_mq_rq_cpu); void blk_mq_cancel_work_sync(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } static int __init blk_mq_init(void) { int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(blk_cpu_done, i)); for_each_possible_cpu(i) INIT_CSD(&per_cpu(blk_cpu_csd, i), __blk_mq_complete_request_remote, NULL); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, "block/softirq:dead", NULL, blk_softirq_cpu_dead); cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, blk_mq_hctx_notify_dead); cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", blk_mq_hctx_notify_online, blk_mq_hctx_notify_offline); return 0; } subsys_initcall(blk_mq_init); |
1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Logitech Flight System G940 * * Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com> */ /* */ #include <linux/input.h> #include <linux/hid.h> #include "hid-lg.h" /* * G940 Theory of Operation (from experimentation) * * There are 63 fields (only 3 of them currently used) * 0 - seems to be command field * 1 - 30 deal with the x axis * 31 -60 deal with the y axis * * Field 1 is x axis constant force * Field 31 is y axis constant force * * other interesting fields 1,2,3,4 on x axis * (same for 31,32,33,34 on y axis) * * 0 0 127 127 makes the joystick autocenter hard * * 127 0 127 127 makes the joystick loose on the right, * but stops all movemnt left * * -127 0 -127 -127 makes the joystick loose on the left, * but stops all movement right * * 0 0 -127 -127 makes the joystick rattle very hard * * I'm sure these are effects that I don't know enough about them */ static int hid_lg3ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); int x, y; /* * Available values in the field should always be 63, but we only use up to * 35. Instead, clear the entire area, however big it is. */ memset(report->field[0]->value, 0, sizeof(__s32) * report->field[0]->report_count); switch (effect->type) { case FF_CONSTANT: /* * Already clamped in ff_memless * 0 is center (different then other logitech) */ x = effect->u.ramp.start_level; y = effect->u.ramp.end_level; /* send command byte */ report->field[0]->value[0] = 0x51; /* * Sign backwards from other Force3d pro * which get recast here in two's complement 8 bits */ report->field[0]->value[1] = (unsigned char)(-x); report->field[0]->value[31] = (unsigned char)(-y); hid_hw_request(hid, report, HID_REQ_SET_REPORT); break; } return 0; } static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); /* * Auto Centering probed from device * NOTE: deadman's switch on G940 must be covered * for effects to work */ report->field[0]->value[0] = 0x51; report->field[0]->value[1] = 0x00; report->field[0]->value[2] = 0x00; report->field[0]->value[3] = 0x7F; report->field[0]->value[4] = 0x7F; report->field[0]->value[31] = 0x00; report->field[0]->value[32] = 0x00; report->field[0]->value[33] = 0x7F; report->field[0]->value[34] = 0x7F; hid_hw_request(hid, report, HID_REQ_SET_REPORT); } static const signed short ff3_joystick_ac[] = { FF_CONSTANT, FF_AUTOCENTER, -1 }; int lg3ff_init(struct hid_device *hid) { struct hid_input *hidinput; struct input_dev *dev; const signed short *ff_bits = ff3_joystick_ac; int error; int i; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) return -ENODEV; /* Assume single fixed device G940 */ for (i = 0; ff_bits[i] >= 0; i++) set_bit(ff_bits[i], dev->ffbit); error = input_ff_create_memless(dev, NULL, hid_lg3ff_play); if (error) return error; if (test_bit(FF_AUTOCENTER, dev->ffbit)) dev->ff->set_autocenter = hid_lg3ff_set_autocenter; hid_info(hid, "Force feedback for Logitech Flight System G940 by Gary Stein <LordCnidarian@gmail.com>\n"); return 0; } |
104 9 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_NEIGHBOUR_H #define _NET_NEIGHBOUR_H #include <linux/neighbour.h> /* * Generic neighbour manipulation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * Changes: * * Harald Welte: <laforge@gnumonks.org> * - Add neighbour cache statistics like rtstat */ #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> #include <linux/bitmap.h> #include <linux/err.h> #include <linux/sysctl.h> #include <linux/workqueue.h> #include <net/rtnetlink.h> #include <net/neighbour_tables.h> /* * NUD stands for "neighbor unreachability detection" */ #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE) #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY) #define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE) struct neighbour; enum { NEIGH_VAR_MCAST_PROBES, NEIGH_VAR_UCAST_PROBES, NEIGH_VAR_APP_PROBES, NEIGH_VAR_MCAST_REPROBES, NEIGH_VAR_RETRANS_TIME, NEIGH_VAR_BASE_REACHABLE_TIME, NEIGH_VAR_DELAY_PROBE_TIME, NEIGH_VAR_INTERVAL_PROBE_TIME_MS, NEIGH_VAR_GC_STALETIME, NEIGH_VAR_QUEUE_LEN_BYTES, NEIGH_VAR_PROXY_QLEN, NEIGH_VAR_ANYCAST_DELAY, NEIGH_VAR_PROXY_DELAY, NEIGH_VAR_LOCKTIME, #define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1) /* Following are used as a second way to access one of the above */ NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */ NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */ NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */ /* Following are used by "default" only */ NEIGH_VAR_GC_INTERVAL, NEIGH_VAR_GC_THRESH1, NEIGH_VAR_GC_THRESH2, NEIGH_VAR_GC_THRESH3, NEIGH_VAR_MAX }; struct neigh_parms { possible_net_t net; struct net_device *dev; netdevice_tracker dev_tracker; struct list_head list; int (*neigh_setup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; refcount_t refcnt; struct rcu_head rcu_head; int reachable_time; u32 qlen; int data[NEIGH_VAR_DATA_MAX]; DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); }; static inline void neigh_var_set(struct neigh_parms *p, int index, int val) { set_bit(index, p->data_state); p->data[index] = val; } #define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr]) /* In ndo_neigh_setup, NEIGH_VAR_INIT should be used. * In other cases, NEIGH_VAR_SET should be used. */ #define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val) #define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val) static inline void neigh_parms_data_state_setall(struct neigh_parms *p) { bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX); } static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p) { bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX); } struct neigh_statistics { unsigned long allocs; /* number of allocated neighs */ unsigned long destroys; /* number of destroyed neighs */ unsigned long hash_grows; /* number of hash resizes */ unsigned long res_failed; /* number of failed resolutions */ unsigned long lookups; /* number of lookups */ unsigned long hits; /* number of hits (among lookups) */ unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */ unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */ unsigned long periodic_gc_runs; /* number of periodic GC runs */ unsigned long forced_gc_runs; /* number of forced GC runs */ unsigned long unres_discards; /* number of unresolved drops */ unsigned long table_fulls; /* times even gc couldn't help */ }; #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) struct neighbour { struct hlist_node hash; struct hlist_node dev_list; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; refcount_t refcnt; unsigned int arp_queue_len_bytes; struct sk_buff_head arp_queue; struct timer_list timer; unsigned long used; atomic_t probes; u8 nud_state; u8 type; u8 dead; u8 protocol; u32 flags; seqlock_t ha_lock; unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8); struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct list_head gc_list; struct list_head managed_list; struct rcu_head rcu; struct net_device *dev; netdevice_tracker dev_tracker; u8 primary_key[]; } __randomize_layout; struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); }; struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; netdevice_tracker dev_tracker; u32 flags; u8 protocol; u32 key[]; }; /* * neighbour table manipulation */ #define NEIGH_NUM_HASH_RND 4 struct neigh_hash_table { struct hlist_head *hash_heads; unsigned int hash_shift; __u32 hash_rnd[NEIGH_NUM_HASH_RND]; struct rcu_head rcu; }; struct neigh_table { int family; unsigned int entry_size; unsigned int key_len; __be16 protocol; __u32 (*hash)(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); bool (*key_eq)(const struct neighbour *, const void *pkey); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *skb); int (*is_multicast)(const void *pkey); bool (*allow_add)(const struct net_device *dev, struct netlink_ext_ack *extack); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct delayed_work managed_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; atomic_t gc_entries; struct list_head gc_list; struct list_head managed_list; rwlock_t lock; unsigned long last_rand; struct neigh_statistics __percpu *stats; struct neigh_hash_table __rcu *nht; struct pneigh_entry **phash_buckets; }; static inline int neigh_parms_family(struct neigh_parms *p) { return p->tbl->family; } #define NEIGH_PRIV_ALIGN sizeof(long long) #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN) static inline void *neighbour_priv(const struct neighbour *n) { return (char *)n + n->tbl->entry_size; } /* flags for neigh_update() */ #define NEIGH_UPDATE_F_OVERRIDE BIT(0) #define NEIGH_UPDATE_F_WEAK_OVERRIDE BIT(1) #define NEIGH_UPDATE_F_OVERRIDE_ISROUTER BIT(2) #define NEIGH_UPDATE_F_USE BIT(3) #define NEIGH_UPDATE_F_MANAGED BIT(4) #define NEIGH_UPDATE_F_EXT_LEARNED BIT(5) #define NEIGH_UPDATE_F_ISROUTER BIT(6) #define NEIGH_UPDATE_F_ADMIN BIT(7) /* In-kernel representation for NDA_FLAGS_EXT flags: */ #define NTF_OLD_MASK 0xff #define NTF_EXT_SHIFT 8 #define NTF_EXT_MASK (NTF_EXT_MANAGED) #define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT) extern const struct nla_policy nda_policy[]; #define neigh_for_each_in_bucket(pos, head) hlist_for_each_entry(pos, head, hash) #define neigh_for_each_in_bucket_rcu(pos, head) \ hlist_for_each_entry_rcu(pos, head, hash) #define neigh_for_each_in_bucket_safe(pos, tmp, head) \ hlist_for_each_entry_safe(pos, tmp, head, hash) static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey) { return *(const u32 *)n->primary_key == *(const u32 *)pkey; } static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey) { const u32 *n32 = (const u32 *)n->primary_key; const u32 *p32 = pkey; return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0; } static inline struct neighbour *___neigh_lookup_noref( struct neigh_table *tbl, bool (*key_eq)(const struct neighbour *n, const void *pkey), __u32 (*hash)(const void *pkey, const struct net_device *dev, __u32 *hash_rnd), const void *pkey, struct net_device *dev) { struct neigh_hash_table *nht = rcu_dereference(tbl->nht); struct neighbour *n; u32 hash_val; hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[hash_val]) if (n->dev == dev && key_eq(n, pkey)) return n; return NULL; } static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, const void *pkey, struct net_device *dev) { return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); } static inline void neigh_confirm(struct neighbour *n) { if (n) { unsigned long now = jiffies; /* avoid dirtying neighbour */ if (READ_ONCE(n->confirmed) != now) WRITE_ONCE(n->confirmed, now); } } void neigh_table_init(int index, struct neigh_table *tbl); int neigh_table_clear(int index, struct neigh_table *tbl); struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev); struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, struct net_device *dev, bool want_ref); static inline struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, struct net_device *dev) { return __neigh_create(tbl, pkey, dev, true); } void neigh_destroy(struct neighbour *neigh); int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, const bool immediate_ok); int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, u32 nlmsg_pid); void __neigh_set_probe_once(struct neighbour *neigh); bool neigh_remove_one(struct neighbour *ndel); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev); int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); struct neighbour *neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct net_device *dev); struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl); void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); static inline struct net *neigh_parms_net(const struct neigh_parms *parms) { return read_pnet(&parms->net); } unsigned long neigh_rand_reach_time(unsigned long base); void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, struct sk_buff *skb); struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat); struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev); int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev); static inline struct net *pneigh_net(const struct pneigh_entry *pneigh) { return read_pnet(&pneigh->net); } void neigh_app_ns(struct neighbour *n); void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *); struct neigh_seq_state { struct seq_net_private p; struct neigh_table *tbl; struct neigh_hash_table *nht; void *(*neigh_sub_iter)(struct neigh_seq_state *state, struct neighbour *n, loff_t *pos); unsigned int bucket; unsigned int flags; #define NEIGH_SEQ_NEIGH_ONLY 0x00000001 #define NEIGH_SEQ_IS_PNEIGH 0x00000002 #define NEIGH_SEQ_SKIP_NOARP 0x00000004 }; void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int); void *neigh_seq_next(struct seq_file *, void *, loff_t *); void neigh_seq_stop(struct seq_file *, void *); int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, proc_handler *proc_handler); void neigh_sysctl_unregister(struct neigh_parms *p); static inline void __neigh_parms_put(struct neigh_parms *parms) { refcount_dec(&parms->refcnt); } static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) { refcount_inc(&parms->refcnt); return parms; } /* * Neighbour references */ static inline void neigh_release(struct neighbour *neigh) { if (refcount_dec_and_test(&neigh->refcnt)) neigh_destroy(neigh); } static inline struct neighbour * neigh_clone(struct neighbour *neigh) { if (neigh) refcount_inc(&neigh->refcnt); return neigh; } #define neigh_hold(n) refcount_inc(&(n)->refcnt) static __always_inline int neigh_event_send_probe(struct neighbour *neigh, struct sk_buff *skb, const bool immediate_ok) { unsigned long now = jiffies; if (READ_ONCE(neigh->used) != now) WRITE_ONCE(neigh->used, now); if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))) return __neigh_event_send(neigh, skb, immediate_ok); return 0; } static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { return neigh_event_send_probe(neigh, skb, true); } #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) { unsigned int seq, hh_alen; do { seq = read_seqbegin(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(ETH_HLEN); memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN); } while (read_seqretry(&hh->hh_lock, seq)); return 0; } #endif static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) { unsigned int hh_alen = 0; unsigned int seq; unsigned int hh_len; do { seq = read_seqbegin(&hh->hh_lock); hh_len = READ_ONCE(hh->hh_len); if (likely(hh_len <= HH_DATA_MOD)) { hh_alen = HH_DATA_MOD; /* skb_push() would proceed silently if we have room for * the unaligned size but not for the aligned size: * check headroom explicitly. */ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) { /* this is inlined by gcc */ memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); } } else { hh_alen = HH_DATA_ALIGN(hh_len); if (likely(skb_headroom(skb) >= hh_alen)) { memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); } } } while (read_seqretry(&hh->hh_lock, seq)); if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) { kfree_skb(skb); return NET_XMIT_DROP; } __skb_push(skb, hh_len); return dev_queue_xmit(skb); } static inline int neigh_output(struct neighbour *n, struct sk_buff *skb, bool skip_cache) { const struct hh_cache *hh = &n->hh; /* n->nud_state and hh->hh_len could be changed under us. * neigh_hh_output() is taking care of the race later. */ if (!skip_cache && (READ_ONCE(n->nud_state) & NUD_CONNECTED) && READ_ONCE(hh->hh_len)) return neigh_hh_output(hh, skb); return READ_ONCE(n->output)(n, skb); } static inline struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) { struct neighbour *n = neigh_lookup(tbl, pkey, dev); if (n || !creat) return n; n = neigh_create(tbl, pkey, dev); return IS_ERR(n) ? NULL : n; } static inline struct neighbour * __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, struct net_device *dev) { struct neighbour *n = neigh_lookup(tbl, pkey, dev); if (n) return n; return neigh_create(tbl, pkey, dev); } struct neighbour_cb { unsigned long sched_next; unsigned int flags; }; #define LOCALLY_ENQUEUED 0x1 #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, const struct net_device *dev) { unsigned int seq; do { seq = read_seqbegin(&n->ha_lock); memcpy(dst, n->ha, dev->addr_len); } while (read_seqretry(&n->ha_lock, seq)); } static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags, int *notify) { u8 ndm_flags = 0; ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0; if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) { if (ndm_flags & NTF_ROUTER) neigh->flags |= NTF_ROUTER; else neigh->flags &= ~NTF_ROUTER; *notify = 1; } } #endif |
2 2 2 1 1 2 2 2 3 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 4 4 4 4 4 1 3 3 3 3 4 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for the Prodikeys PC-MIDI Keyboard * providing midi & extra multimedia keys functionality * * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk> * * Controls for Octave Shift Up/Down, Channel, and * Sustain Duration available via sysfs. */ /* */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/hid.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include "hid-ids.h" #define pk_debug(format, arg...) \ pr_debug("hid-prodikeys: " format "\n" , ## arg) #define pk_error(format, arg...) \ pr_err("hid-prodikeys: " format "\n" , ## arg) struct pcmidi_snd; struct pcmidi_sustain { unsigned long in_use; struct pcmidi_snd *pm; struct timer_list timer; unsigned char status; unsigned char note; unsigned char velocity; }; #define PCMIDI_SUSTAINED_MAX 32 struct pcmidi_snd { struct hid_device *hdev; unsigned short ifnum; struct hid_report *pcmidi_report6; struct input_dev *input_ep82; unsigned short midi_mode; unsigned short midi_sustain_mode; unsigned short midi_sustain; unsigned short midi_channel; short midi_octave; struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX]; unsigned short fn_state; unsigned short last_key[24]; spinlock_t rawmidi_in_lock; struct snd_card *card; struct snd_rawmidi *rwmidi; struct snd_rawmidi_substream *in_substream; unsigned long in_triggered; }; #define PK_QUIRK_NOGET 0x00010000 #define PCMIDI_MIDDLE_C 60 #define PCMIDI_CHANNEL_MIN 0 #define PCMIDI_CHANNEL_MAX 15 #define PCMIDI_OCTAVE_MIN (-2) #define PCMIDI_OCTAVE_MAX 2 #define PCMIDI_SUSTAIN_MIN 0 #define PCMIDI_SUSTAIN_MAX 5000 static const char shortname[] = "PC-MIDI"; static const char longname[] = "Prodikeys PC-MIDI Keyboard"; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); module_param_array(id, charp, NULL, 0444); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver"); /* Output routine for the sysfs channel file */ static ssize_t show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read channel=%u\n", pm->midi_channel); return sprintf(buf, "%u (min:%u, max:%u)\n", pm->midi_channel, PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX); } /* Input routine for the sysfs channel file */ static ssize_t store_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); unsigned channel = 0; if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) { dbg_hid("pcmidi sysfs write channel=%u\n", channel); pm->midi_channel = channel; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel, store_channel); static struct device_attribute *sysfs_device_attr_channel = { &dev_attr_channel, }; /* Output routine for the sysfs sustain file */ static ssize_t show_sustain(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read sustain=%u\n", pm->midi_sustain); return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pm->midi_sustain, PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX); } /* Input routine for the sysfs sustain file */ static ssize_t store_sustain(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); unsigned sustain = 0; if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) { dbg_hid("pcmidi sysfs write sustain=%u\n", sustain); pm->midi_sustain = sustain; pm->midi_sustain_mode = (0 == sustain || !pm->midi_mode) ? 0 : 1; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain, store_sustain); static struct device_attribute *sysfs_device_attr_sustain = { &dev_attr_sustain, }; /* Output routine for the sysfs octave file */ static ssize_t show_octave(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read octave=%d\n", pm->midi_octave); return sprintf(buf, "%d (min:%d, max:%d)\n", pm->midi_octave, PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX); } /* Input routine for the sysfs octave file */ static ssize_t store_octave(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); int octave = 0; if (sscanf(buf, "%d", &octave) > 0 && octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) { dbg_hid("pcmidi sysfs write octave=%d\n", octave); pm->midi_octave = octave; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave, store_octave); static struct device_attribute *sysfs_device_attr_octave = { &dev_attr_octave, }; static void pcmidi_send_note(struct pcmidi_snd *pm, unsigned char status, unsigned char note, unsigned char velocity) { unsigned long flags; unsigned char buffer[3]; buffer[0] = status; buffer[1] = note; buffer[2] = velocity; spin_lock_irqsave(&pm->rawmidi_in_lock, flags); if (!pm->in_substream) goto drop_note; if (!test_bit(pm->in_substream->number, &pm->in_triggered)) goto drop_note; snd_rawmidi_receive(pm->in_substream, buffer, 3); drop_note: spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags); return; } static void pcmidi_sustained_note_release(struct timer_list *t) { struct pcmidi_sustain *pms = timer_container_of(pms, t, timer); pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity); pms->in_use = 0; } static void init_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 0; pms->pm = pm; timer_setup(&pms->timer, pcmidi_sustained_note_release, 0); } } static void stop_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 1; timer_delete_sync(&pms->timer); } } static int pcmidi_get_output_report(struct pcmidi_snd *pm) { struct hid_device *hdev = pm->hdev; struct hid_report *report; list_for_each_entry(report, &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) { if (!(6 == report->id)) continue; if (report->maxfield < 1) { hid_err(hdev, "output report is empty\n"); break; } if (report->field[0]->report_count != 2) { hid_err(hdev, "field count too low\n"); break; } pm->pcmidi_report6 = report; return 0; } /* should never get here */ return -ENODEV; } static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state) { struct hid_device *hdev = pm->hdev; struct hid_report *report = pm->pcmidi_report6; report->field[0]->value[0] = 0x01; report->field[0]->value[1] = state; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); } static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data) { u32 bit_mask; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); /*KEY_MAIL or octave down*/ if (pm->midi_mode && bit_mask == 0x004000) { /* octave down */ pm->midi_octave--; if (pm->midi_octave < -2) pm->midi_octave = -2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); return 1; } /*KEY_WWW or sustain*/ else if (pm->midi_mode && bit_mask == 0x000004) { /* sustain on/off*/ pm->midi_sustain_mode ^= 0x1; return 1; } return 0; /* continue key processing */ } static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size) { struct pcmidi_sustain *pms; unsigned i, j; unsigned char status, note, velocity; unsigned num_notes = (size-1)/2; for (j = 0; j < num_notes; j++) { note = data[j*2+1]; velocity = data[j*2+2]; if (note < 0x81) { /* note on */ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */ note = note - 0x54 + PCMIDI_MIDDLE_C + (pm->midi_octave * 12); if (0 == velocity) velocity = 1; /* force note on */ } else { /* note off */ status = 128 + pm->midi_channel; /* 1000nnnn */ note = note - 0x94 + PCMIDI_MIDDLE_C + (pm->midi_octave*12); if (pm->midi_sustain_mode) { for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; if (!pms->in_use) { pms->status = status; pms->note = note; pms->velocity = velocity; pms->in_use = 1; mod_timer(&pms->timer, jiffies + msecs_to_jiffies(pm->midi_sustain)); return 1; } } } } pcmidi_send_note(pm, status, note, velocity); } return 1; } static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data) { unsigned key; u32 bit_mask; u32 bit_index; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; /* break keys */ for (bit_index = 0; bit_index < 24; bit_index++) { if (!((0x01 << bit_index) & bit_mask)) { input_event(pm->input_ep82, EV_KEY, pm->last_key[bit_index], 0); pm->last_key[bit_index] = 0; } } /* make keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = 0; switch ((0x01 << bit_index) & bit_mask) { case 0x000010: /* Fn lock*/ pm->fn_state ^= 0x000010; if (pm->fn_state) pcmidi_submit_output_report(pm, 0xc5); else pcmidi_submit_output_report(pm, 0xc6); continue; case 0x020000: /* midi launcher..send a key (qwerty) or not? */ pcmidi_submit_output_report(pm, 0xc1); pm->midi_mode ^= 0x01; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); continue; case 0x100000: /* KEY_MESSENGER or octave up */ dbg_hid("pcmidi mode: %d\n", pm->midi_mode); if (pm->midi_mode) { pm->midi_octave++; if (pm->midi_octave > 2) pm->midi_octave = 2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); continue; } else key = KEY_MESSENGER; break; case 0x400000: key = KEY_CALENDAR; break; case 0x080000: key = KEY_ADDRESSBOOK; break; case 0x040000: key = KEY_DOCUMENTS; break; case 0x800000: key = KEY_WORDPROCESSOR; break; case 0x200000: key = KEY_SPREADSHEET; break; case 0x010000: key = KEY_COFFEE; break; case 0x000100: key = KEY_HELP; break; case 0x000200: key = KEY_SEND; break; case 0x000400: key = KEY_REPLY; break; case 0x000800: key = KEY_FORWARDMAIL; break; case 0x001000: key = KEY_NEW; break; case 0x002000: key = KEY_OPEN; break; case 0x004000: key = KEY_CLOSE; break; case 0x008000: key = KEY_SAVE; break; case 0x000001: key = KEY_UNDO; break; case 0x000002: key = KEY_REDO; break; case 0x000004: key = KEY_SPELLCHECK; break; case 0x000008: key = KEY_PRINT; break; } if (key) { input_event(pm->input_ep82, EV_KEY, key, 1); pm->last_key[bit_index] = key; } } return 1; } static int pcmidi_handle_report( struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size) { int ret = 0; switch (report_id) { case 0x01: /* midi keys (qwerty)*/ ret = pcmidi_handle_report1(pm, data); break; case 0x03: /* midi keyboard (musical)*/ ret = pcmidi_handle_report3(pm, data, size); break; case 0x04: /* multimedia/midi keys (qwerty)*/ ret = pcmidi_handle_report4(pm, data); break; } return ret; } static void pcmidi_setup_extra_keys( struct pcmidi_snd *pm, struct input_dev *input) { /* reassigned functionality for N/A keys MY PICTURES => KEY_WORDPROCESSOR MY MUSIC=> KEY_SPREADSHEET */ static const unsigned int keys[] = { KEY_FN, KEY_MESSENGER, KEY_CALENDAR, KEY_ADDRESSBOOK, KEY_DOCUMENTS, KEY_WORDPROCESSOR, KEY_SPREADSHEET, KEY_COFFEE, KEY_HELP, KEY_SEND, KEY_REPLY, KEY_FORWARDMAIL, KEY_NEW, KEY_OPEN, KEY_CLOSE, KEY_SAVE, KEY_UNDO, KEY_REDO, KEY_SPELLCHECK, KEY_PRINT, 0 }; const unsigned int *pkeys = &keys[0]; unsigned short i; if (pm->ifnum != 1) /* only set up ONCE for interace 1 */ return; pm->input_ep82 = input; for (i = 0; i < 24; i++) pm->last_key[i] = 0; while (*pkeys != 0) { set_bit(*pkeys, pm->input_ep82->keybit); ++pkeys; } } static int pcmidi_set_operational(struct pcmidi_snd *pm) { int rc; if (pm->ifnum != 1) return 0; /* only set up ONCE for interace 1 */ rc = pcmidi_get_output_report(pm); if (rc < 0) return rc; pcmidi_submit_output_report(pm, 0xc1); return 0; } static int pcmidi_snd_free(struct snd_device *dev) { return 0; } static int pcmidi_in_open(struct snd_rawmidi_substream *substream) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in open\n"); pm->in_substream = substream; return 0; } static int pcmidi_in_close(struct snd_rawmidi_substream *substream) { dbg_hid("pcmidi in close\n"); return 0; } static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in trigger %d\n", up); pm->in_triggered = up; } static const struct snd_rawmidi_ops pcmidi_in_ops = { .open = pcmidi_in_open, .close = pcmidi_in_close, .trigger = pcmidi_in_trigger }; static int pcmidi_snd_initialise(struct pcmidi_snd *pm) { static int dev; struct snd_card *card; struct snd_rawmidi *rwmidi; int err; static struct snd_device_ops ops = { .dev_free = pcmidi_snd_free, }; if (pm->ifnum != 1) return 0; /* only set up midi device ONCE for interace 1 */ if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } /* Setup sound card */ err = snd_card_new(&pm->hdev->dev, index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { pk_error("failed to create pc-midi sound card\n"); err = -ENOMEM; goto fail; } pm->card = card; /* Setup sound device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops); if (err < 0) { pk_error("failed to create pc-midi sound device: error %d\n", err); goto fail; } strscpy(card->driver, shortname, sizeof(card->driver)); strscpy(card->shortname, shortname, sizeof(card->shortname)); strscpy(card->longname, longname, sizeof(card->longname)); /* Set up rawmidi */ err = snd_rawmidi_new(card, card->shortname, 0, 0, 1, &rwmidi); if (err < 0) { pk_error("failed to create pc-midi rawmidi device: error %d\n", err); goto fail; } pm->rwmidi = rwmidi; strscpy(rwmidi->name, card->shortname, sizeof(rwmidi->name)); rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT; rwmidi->private_data = pm; snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT, &pcmidi_in_ops); /* create sysfs variables */ err = device_create_file(&pm->hdev->dev, sysfs_device_attr_channel); if (err < 0) { pk_error("failed to create sysfs attribute channel: error %d\n", err); goto fail; } err = device_create_file(&pm->hdev->dev, sysfs_device_attr_sustain); if (err < 0) { pk_error("failed to create sysfs attribute sustain: error %d\n", err); goto fail_attr_sustain; } err = device_create_file(&pm->hdev->dev, sysfs_device_attr_octave); if (err < 0) { pk_error("failed to create sysfs attribute octave: error %d\n", err); goto fail_attr_octave; } spin_lock_init(&pm->rawmidi_in_lock); init_sustain_timers(pm); err = pcmidi_set_operational(pm); if (err < 0) { pk_error("failed to find output report\n"); goto fail_register; } /* register it */ err = snd_card_register(card); if (err < 0) { pk_error("failed to register pc-midi sound card: error %d\n", err); goto fail_register; } dbg_hid("pcmidi_snd_initialise finished ok\n"); return 0; fail_register: stop_sustain_timers(pm); device_remove_file(&pm->hdev->dev, sysfs_device_attr_octave); fail_attr_octave: device_remove_file(&pm->hdev->dev, sysfs_device_attr_sustain); fail_attr_sustain: device_remove_file(&pm->hdev->dev, sysfs_device_attr_channel); fail: if (pm->card) { snd_card_free(pm->card); pm->card = NULL; } return err; } static int pcmidi_snd_terminate(struct pcmidi_snd *pm) { if (pm->card) { stop_sustain_timers(pm); device_remove_file(&pm->hdev->dev, sysfs_device_attr_channel); device_remove_file(&pm->hdev->dev, sysfs_device_attr_sustain); device_remove_file(&pm->hdev->dev, sysfs_device_attr_octave); snd_card_disconnect(pm->card); snd_card_free_when_closed(pm->card); } return 0; } /* * PC-MIDI report descriptor for report id is wrong. */ static const __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 178 && rdesc[111] == 0x06 && rdesc[112] == 0x00 && rdesc[113] == 0xff) { hid_info(hdev, "fixing up pc-midi keyboard report descriptor\n"); rdesc[144] = 0x18; /* report 4: was 0x10 report count */ } return rdesc; } static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct pcmidi_snd *pm = hid_get_drvdata(hdev); if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) && 1 == pm->ifnum) { pcmidi_setup_extra_keys(pm, hi->input); return 0; } return 0; } static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct pcmidi_snd *pm = hid_get_drvdata(hdev); int ret = 0; if (1 == pm->ifnum) { if (report->id == data[0]) switch (report->id) { case 0x01: /* midi keys (qwerty)*/ case 0x03: /* midi keyboard (musical)*/ case 0x04: /* extra/midi keys (qwerty)*/ ret = pcmidi_handle_report(pm, report->id, data, size); break; } } return ret; } static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct usb_interface *intf; unsigned short ifnum; unsigned long quirks = id->driver_data; struct pcmidi_snd *pm; if (!hid_is_usb(hdev)) return -EINVAL; intf = to_usb_interface(hdev->dev.parent); ifnum = intf->cur_altsetting->desc.bInterfaceNumber; pm = kzalloc(sizeof(*pm), GFP_KERNEL); if (pm == NULL) { hid_err(hdev, "can't alloc descriptor\n"); return -ENOMEM; } pm->hdev = hdev; pm->ifnum = ifnum; hid_set_drvdata(hdev, pm); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid parse failed\n"); goto err_free; } if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */ hdev->quirks |= HID_QUIRK_NOGET; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } ret = pcmidi_snd_initialise(pm); if (ret < 0) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: kfree(pm); return ret; } static void pk_remove(struct hid_device *hdev) { struct pcmidi_snd *pm = hid_get_drvdata(hdev); pcmidi_snd_terminate(pm); hid_hw_stop(hdev); kfree(pm); } static const struct hid_device_id pk_devices[] = { {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI), .driver_data = PK_QUIRK_NOGET}, { } }; MODULE_DEVICE_TABLE(hid, pk_devices); static struct hid_driver pk_driver = { .name = "prodikeys", .id_table = pk_devices, .report_fixup = pk_report_fixup, .input_mapping = pk_input_mapping, .raw_event = pk_raw_event, .probe = pk_probe, .remove = pk_remove, }; module_hid_driver(pk_driver); MODULE_DESCRIPTION("HID driver for the Prodikeys PC-MIDI Keyboard"); MODULE_LICENSE("GPL"); |
398 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_X86_XSAVE_H #define __ASM_X86_XSAVE_H #include <linux/uaccess.h> #include <linux/types.h> #include <asm/processor.h> #include <asm/fpu/api.h> #include <asm/user.h> /* Bit 63 of XCR0 is reserved for future expansion */ #define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63))) #define FXSAVE_SIZE 512 #define XSAVE_HDR_SIZE 64 #define XSAVE_HDR_OFFSET FXSAVE_SIZE #define XSAVE_YMM_SIZE 256 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) #define XSAVE_ALIGNMENT 64 /* All currently supported user features */ #define XFEATURE_MASK_USER_SUPPORTED (XFEATURE_MASK_FP | \ XFEATURE_MASK_SSE | \ XFEATURE_MASK_YMM | \ XFEATURE_MASK_OPMASK | \ XFEATURE_MASK_ZMM_Hi256 | \ XFEATURE_MASK_Hi16_ZMM | \ XFEATURE_MASK_PKRU | \ XFEATURE_MASK_BNDREGS | \ XFEATURE_MASK_BNDCSR | \ XFEATURE_MASK_XTILE | \ XFEATURE_MASK_APX) /* * Features which are restored when returning to user space. * PKRU is not restored on return to user space because PKRU * is switched eagerly in switch_to() and flush_thread() */ #define XFEATURE_MASK_USER_RESTORE \ (XFEATURE_MASK_USER_SUPPORTED & ~XFEATURE_MASK_PKRU) /* Features which are dynamically enabled for a process on request */ #define XFEATURE_MASK_USER_DYNAMIC XFEATURE_MASK_XTILE_DATA /* All currently supported supervisor features */ #define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID | \ XFEATURE_MASK_CET_USER) /* * A supervisor state component may not always contain valuable information, * and its size may be huge. Saving/restoring such supervisor state components * at each context switch can cause high CPU and space overhead, which should * be avoided. Such supervisor state components should only be saved/restored * on demand. The on-demand supervisor features are set in this mask. * * Unlike the existing supported supervisor features, an independent supervisor * feature does not allocate a buffer in task->fpu, and the corresponding * supervisor state component cannot be saved/restored at each context switch. * * To support an independent supervisor feature, a developer should follow the * dos and don'ts as below: * - Do dynamically allocate a buffer for the supervisor state component. * - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the * state component to/from the buffer. * - Don't set the bit corresponding to the independent supervisor feature in * IA32_XSS at run time, since it has been set at boot time. */ #define XFEATURE_MASK_INDEPENDENT (XFEATURE_MASK_LBR) /* * Unsupported supervisor features. When a supervisor feature in this mask is * supported in the future, move it to the supported supervisor feature mask. */ #define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT | \ XFEATURE_MASK_CET_KERNEL) /* All supervisor states including supported and unsupported states. */ #define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \ XFEATURE_MASK_INDEPENDENT | \ XFEATURE_MASK_SUPERVISOR_UNSUPPORTED) /* * The feature mask required to restore FPU state: * - All user states which are not eagerly switched in switch_to()/exec() * - The suporvisor states */ #define XFEATURE_MASK_FPSTATE (XFEATURE_MASK_USER_RESTORE | \ XFEATURE_MASK_SUPERVISOR_SUPPORTED) /* * Features in this mask have space allocated in the signal frame, but may not * have that space initialized when the feature is in its init state. */ #define XFEATURE_MASK_SIGFRAME_INITOPT (XFEATURE_MASK_XTILE | \ XFEATURE_MASK_USER_DYNAMIC) extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; extern void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask); int xfeature_size(int xfeature_nr); void xsaves(struct xregs_state *xsave, u64 mask); void xrstors(struct xregs_state *xsave, u64 mask); int xfd_enable_feature(u64 xfd_err); #ifdef CONFIG_X86_64 DECLARE_STATIC_KEY_FALSE(__fpu_state_size_dynamic); #endif #ifdef CONFIG_X86_64 DECLARE_STATIC_KEY_FALSE(__fpu_state_size_dynamic); static __always_inline __pure bool fpu_state_size_dynamic(void) { return static_branch_unlikely(&__fpu_state_size_dynamic); } #else static __always_inline __pure bool fpu_state_size_dynamic(void) { return false; } #endif #endif |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Fence mechanism for dma-buf to allow for asynchronous dma access * * Copyright (C) 2012 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> */ #ifndef __LINUX_DMA_FENCE_H #define __LINUX_DMA_FENCE_H #include <linux/err.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/bitops.h> #include <linux/kref.h> #include <linux/sched.h> #include <linux/printk.h> #include <linux/rcupdate.h> #include <linux/timekeeping.h> struct dma_fence; struct dma_fence_ops; struct dma_fence_cb; /** * struct dma_fence - software synchronization primitive * @refcount: refcount for this fence * @ops: dma_fence_ops associated with this fence * @rcu: used for releasing fence with kfree_rcu * @cb_list: list of all callbacks to call * @lock: spin_lock_irqsave used for locking * @context: execution context this fence belongs to, returned by * dma_fence_context_alloc() * @seqno: the sequence number of this fence inside the execution context, * can be compared to decide which fence would be signaled later. * @flags: A mask of DMA_FENCE_FLAG_* defined below * @timestamp: Timestamp when the fence was signaled. * @error: Optional, only valid if < 0, must be set before calling * dma_fence_signal, indicates that the fence has completed with an error. * * the flags member must be manipulated and read using the appropriate * atomic ops (bit_*), so taking the spinlock will not be needed most * of the time. * * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the * implementer of the fence for its own purposes. Can be used in different * ways by different fence implementers, so do not rely on this. * * Since atomic bitops are used, this is not guaranteed to be the case. * Particularly, if the bit was set, but dma_fence_signal was called right * before this bit was set, it would have been able to set the * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that * after dma_fence_signal was called, any enable_signaling call will have either * been completed, or never called at all. */ struct dma_fence { spinlock_t *lock; const struct dma_fence_ops *ops; /* * We clear the callback list on kref_put so that by the time we * release the fence it is unused. No one should be adding to the * cb_list that they don't themselves hold a reference for. * * The lifetime of the timestamp is similarly tied to both the * rcu freelist and the cb_list. The timestamp is only set upon * signaling while simultaneously notifying the cb_list. Ergo, we * only use either the cb_list of timestamp. Upon destruction, * neither are accessible, and so we can use the rcu. This means * that the cb_list is *only* valid until the signal bit is set, * and to read either you *must* hold a reference to the fence, * and not just the rcu_read_lock. * * Listed in chronological order. */ union { struct list_head cb_list; /* @cb_list replaced by @timestamp on dma_fence_signal() */ ktime_t timestamp; /* @timestamp replaced by @rcu on dma_fence_release() */ struct rcu_head rcu; }; u64 context; u64 seqno; unsigned long flags; struct kref refcount; int error; }; enum dma_fence_flag_bits { DMA_FENCE_FLAG_SIGNALED_BIT, DMA_FENCE_FLAG_TIMESTAMP_BIT, DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ }; typedef void (*dma_fence_func_t)(struct dma_fence *fence, struct dma_fence_cb *cb); /** * struct dma_fence_cb - callback for dma_fence_add_callback() * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list * @func: dma_fence_func_t to call * * This struct will be initialized by dma_fence_add_callback(), additional * data can be passed along by embedding dma_fence_cb in another struct. */ struct dma_fence_cb { struct list_head node; dma_fence_func_t func; }; /** * struct dma_fence_ops - operations implemented for fence * */ struct dma_fence_ops { /** * @use_64bit_seqno: * * True if this dma_fence implementation uses 64bit seqno, false * otherwise. */ bool use_64bit_seqno; /** * @get_driver_name: * * Returns the driver name. This is a callback to allow drivers to * compute the name at runtime, without having it to store permanently * for each fence, or build a cache of some sort. * * This callback is mandatory. */ const char * (*get_driver_name)(struct dma_fence *fence); /** * @get_timeline_name: * * Return the name of the context this fence belongs to. This is a * callback to allow drivers to compute the name at runtime, without * having it to store permanently for each fence, or build a cache of * some sort. * * This callback is mandatory. */ const char * (*get_timeline_name)(struct dma_fence *fence); /** * @enable_signaling: * * Enable software signaling of fence. * * For fence implementations that have the capability for hw->hw * signaling, they can implement this op to enable the necessary * interrupts, or insert commands into cmdstream, etc, to avoid these * costly operations for the common case where only hw->hw * synchronization is required. This is called in the first * dma_fence_wait() or dma_fence_add_callback() path to let the fence * implementation know that there is another driver waiting on the * signal (ie. hw->sw case). * * This is called with irq's disabled, so only spinlocks which disable * IRQ's can be used in the code outside of this callback. * * A return value of false indicates the fence already passed, * or some failure occurred that made it impossible to enable * signaling. True indicates successful enabling. * * &dma_fence.error may be set in enable_signaling, but only when false * is returned. * * Since many implementations can call dma_fence_signal() even when before * @enable_signaling has been called there's a race window, where the * dma_fence_signal() might result in the final fence reference being * released and its memory freed. To avoid this, implementations of this * callback should grab their own reference using dma_fence_get(), to be * released when the fence is signalled (through e.g. the interrupt * handler). * * This callback is optional. If this callback is not present, then the * driver must always have signaling enabled. */ bool (*enable_signaling)(struct dma_fence *fence); /** * @signaled: * * Peek whether the fence is signaled, as a fastpath optimization for * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this * callback does not need to make any guarantees beyond that a fence * once indicates as signalled must always return true from this * callback. This callback may return false even if the fence has * completed already, in this case information hasn't propogated throug * the system yet. See also dma_fence_is_signaled(). * * May set &dma_fence.error if returning true. * * This callback is optional. */ bool (*signaled)(struct dma_fence *fence); /** * @wait: * * Custom wait implementation, defaults to dma_fence_default_wait() if * not set. * * Deprecated and should not be used by new implementations. Only used * by existing implementations which need special handling for their * hardware reset procedure. * * Must return -ERESTARTSYS if the wait is intr = true and the wait was * interrupted, and remaining jiffies if fence has signaled, or 0 if wait * timed out. Can also return other error values on custom implementations, * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that. */ signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout); /** * @release: * * Called on destruction of fence to release additional resources. * Can be called from irq context. This callback is optional. If it is * NULL, then dma_fence_free() is instead called as the default * implementation. */ void (*release)(struct dma_fence *fence); /** * @set_deadline: * * Callback to allow a fence waiter to inform the fence signaler of * an upcoming deadline, such as vblank, by which point the waiter * would prefer the fence to be signaled by. This is intended to * give feedback to the fence signaler to aid in power management * decisions, such as boosting GPU frequency. * * This is called without &dma_fence.lock held, it can be called * multiple times and from any context. Locking is up to the callee * if it has some state to manage. If multiple deadlines are set, * the expectation is to track the soonest one. If the deadline is * before the current time, it should be interpreted as an immediate * deadline. * * This callback is optional. */ void (*set_deadline)(struct dma_fence *fence, ktime_t deadline); }; void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno); void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence); void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq); /** * dma_fence_put - decreases refcount of the fence * @fence: fence to reduce refcount of */ static inline void dma_fence_put(struct dma_fence *fence) { if (fence) kref_put(&fence->refcount, dma_fence_release); } /** * dma_fence_get - increases refcount of the fence * @fence: fence to increase refcount of * * Returns the same fence, with refcount increased by 1. */ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) { if (fence) kref_get(&fence->refcount); return fence; } /** * dma_fence_get_rcu - get a fence from a dma_resv_list with * rcu read lock * @fence: fence to increase refcount of * * Function returns NULL if no refcount could be obtained, or the fence. */ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) { if (kref_get_unless_zero(&fence->refcount)) return fence; else return NULL; } /** * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence * @fencep: pointer to fence to increase refcount of * * Function returns NULL if no refcount could be obtained, or the fence. * This function handles acquiring a reference to a fence that may be * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU), * so long as the caller is using RCU on the pointer to the fence. * * An alternative mechanism is to employ a seqlock to protect a bunch of * fences, such as used by struct dma_resv. When using a seqlock, * the seqlock must be taken before and checked after a reference to the * fence is acquired (as shown here). * * The caller is required to hold the RCU read lock. */ static inline struct dma_fence * dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) { do { struct dma_fence *fence; fence = rcu_dereference(*fencep); if (!fence) return NULL; if (!dma_fence_get_rcu(fence)) continue; /* The atomic_inc_not_zero() inside dma_fence_get_rcu() * provides a full memory barrier upon success (such as now). * This is paired with the write barrier from assigning * to the __rcu protected fence pointer so that if that * pointer still matches the current fence, we know we * have successfully acquire a reference to it. If it no * longer matches, we are holding a reference to some other * reallocated pointer. This is possible if the allocator * is using a freelist like SLAB_TYPESAFE_BY_RCU where the * fence remains valid for the RCU grace period, but it * may be reallocated. When using such allocators, we are * responsible for ensuring the reference we get is to * the right fence, as below. */ if (fence == rcu_access_pointer(*fencep)) return rcu_pointer_handoff(fence); dma_fence_put(fence); } while (1); } #ifdef CONFIG_LOCKDEP bool dma_fence_begin_signalling(void); void dma_fence_end_signalling(bool cookie); void __dma_fence_might_wait(void); #else static inline bool dma_fence_begin_signalling(void) { return true; } static inline void dma_fence_end_signalling(bool cookie) {} static inline void __dma_fence_might_wait(void) {} #endif int dma_fence_signal(struct dma_fence *fence); int dma_fence_signal_locked(struct dma_fence *fence); int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp); int dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp); signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout); int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func); bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb); void dma_fence_enable_sw_signaling(struct dma_fence *fence); /** * dma_fence_is_signaled_locked - Return an indication if the fence * is signaled yet. * @fence: the fence to check * * Returns true if the fence was already signaled, false if not. Since this * function doesn't enable signaling, it is not guaranteed to ever return * true if dma_fence_add_callback(), dma_fence_wait() or * dma_fence_enable_sw_signaling() haven't been called before. * * This function requires &dma_fence.lock to be held. * * See also dma_fence_is_signaled(). */ static inline bool dma_fence_is_signaled_locked(struct dma_fence *fence) { if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return true; if (fence->ops->signaled && fence->ops->signaled(fence)) { dma_fence_signal_locked(fence); return true; } return false; } /** * dma_fence_is_signaled - Return an indication if the fence is signaled yet. * @fence: the fence to check * * Returns true if the fence was already signaled, false if not. Since this * function doesn't enable signaling, it is not guaranteed to ever return * true if dma_fence_add_callback(), dma_fence_wait() or * dma_fence_enable_sw_signaling() haven't been called before. * * It's recommended for seqno fences to call dma_fence_signal when the * operation is complete, it makes it possible to prevent issues from * wraparound between time of issue and time of use by checking the return * value of this function before calling hardware-specific wait instructions. * * See also dma_fence_is_signaled_locked(). */ static inline bool dma_fence_is_signaled(struct dma_fence *fence) { if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return true; if (fence->ops->signaled && fence->ops->signaled(fence)) { dma_fence_signal(fence); return true; } return false; } /** * __dma_fence_is_later - return if f1 is chronologically later than f2 * @f1: the first fence's seqno * @f2: the second fence's seqno from the same context * @ops: dma_fence_ops associated with the seqno * * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not common across contexts. */ static inline bool __dma_fence_is_later(u64 f1, u64 f2, const struct dma_fence_ops *ops) { /* This is for backward compatibility with drivers which can only handle * 32bit sequence numbers. Use a 64bit compare when the driver says to * do so. */ if (ops->use_64bit_seqno) return f1 > f2; return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; } /** * dma_fence_is_later - return if f1 is chronologically later than f2 * @f1: the first fence from the same context * @f2: the second fence from the same context * * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not re-used across contexts. */ static inline bool dma_fence_is_later(struct dma_fence *f1, struct dma_fence *f2) { if (WARN_ON(f1->context != f2->context)) return false; return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops); } /** * dma_fence_is_later_or_same - return true if f1 is later or same as f2 * @f1: the first fence from the same context * @f2: the second fence from the same context * * Returns true if f1 is chronologically later than f2 or the same fence. Both * fences must be from the same context, since a seqno is not re-used across * contexts. */ static inline bool dma_fence_is_later_or_same(struct dma_fence *f1, struct dma_fence *f2) { return f1 == f2 || dma_fence_is_later(f1, f2); } /** * dma_fence_later - return the chronologically later fence * @f1: the first fence from the same context * @f2: the second fence from the same context * * Returns NULL if both fences are signaled, otherwise the fence that would be * signaled last. Both fences must be from the same context, since a seqno is * not re-used across contexts. */ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, struct dma_fence *f2) { if (WARN_ON(f1->context != f2->context)) return NULL; /* * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never * have been set if enable_signaling wasn't called, and enabling that * here is overkill. */ if (dma_fence_is_later(f1, f2)) return dma_fence_is_signaled(f1) ? NULL : f1; else return dma_fence_is_signaled(f2) ? NULL : f2; } /** * dma_fence_get_status_locked - returns the status upon completion * @fence: the dma_fence to query * * Drivers can supply an optional error status condition before they signal * the fence (to indicate whether the fence was completed due to an error * rather than success). The value of the status condition is only valid * if the fence has been signaled, dma_fence_get_status_locked() first checks * the signal state before reporting the error status. * * Returns 0 if the fence has not yet been signaled, 1 if the fence has * been signaled without an error condition, or a negative error code * if the fence has been completed in err. */ static inline int dma_fence_get_status_locked(struct dma_fence *fence) { if (dma_fence_is_signaled_locked(fence)) return fence->error ?: 1; else return 0; } int dma_fence_get_status(struct dma_fence *fence); /** * dma_fence_set_error - flag an error condition on the fence * @fence: the dma_fence * @error: the error to store * * Drivers can supply an optional error status condition before they signal * the fence, to indicate that the fence was completed due to an error * rather than success. This must be set before signaling (so that the value * is visible before any waiters on the signal callback are woken). This * helper exists to help catching erroneous setting of #dma_fence.error. * * Examples of error codes which drivers should use: * * * %-ENODATA This operation produced no data, no other operation affected. * * %-ECANCELED All operations from the same context have been canceled. * * %-ETIME Operation caused a timeout and potentially device reset. */ static inline void dma_fence_set_error(struct dma_fence *fence, int error) { WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); WARN_ON(error >= 0 || error < -MAX_ERRNO); fence->error = error; } /** * dma_fence_timestamp - helper to get the completion timestamp of a fence * @fence: fence to get the timestamp from. * * After a fence is signaled the timestamp is updated with the signaling time, * but setting the timestamp can race with tasks waiting for the signaling. This * helper busy waits for the correct timestamp to appear. */ static inline ktime_t dma_fence_timestamp(struct dma_fence *fence) { if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) return ktime_get(); while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) cpu_relax(); return fence->timestamp; } signed long dma_fence_wait_timeout(struct dma_fence *, bool intr, signed long timeout); signed long dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout, uint32_t *idx); /** * dma_fence_wait - sleep until the fence gets signaled * @fence: the fence to wait on * @intr: if true, do an interruptible wait * * This function will return -ERESTARTSYS if interrupted by a signal, * or 0 if the fence was signaled. Other error values may be * returned on custom implementations. * * Performs a synchronous wait on this fence. It is assumed the caller * directly or indirectly holds a reference to the fence, otherwise the * fence might be freed before return, resulting in undefined behavior. * * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout(). */ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) { signed long ret; /* Since dma_fence_wait_timeout cannot timeout with * MAX_SCHEDULE_TIMEOUT, only valid return values are * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. */ ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); return ret < 0 ? ret : 0; } void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline); struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp); u64 dma_fence_context_alloc(unsigned num); extern const struct dma_fence_ops dma_fence_array_ops; extern const struct dma_fence_ops dma_fence_chain_ops; /** * dma_fence_is_array - check if a fence is from the array subclass * @fence: the fence to test * * Return true if it is a dma_fence_array and false otherwise. */ static inline bool dma_fence_is_array(struct dma_fence *fence) { return fence->ops == &dma_fence_array_ops; } /** * dma_fence_is_chain - check if a fence is from the chain subclass * @fence: the fence to test * * Return true if it is a dma_fence_chain and false otherwise. */ static inline bool dma_fence_is_chain(struct dma_fence *fence) { return fence->ops == &dma_fence_chain_ops; } /** * dma_fence_is_container - check if a fence is a container for other fences * @fence: the fence to test * * Return true if this fence is a container for other fences, false otherwise. * This is important since we can't build up large fence structure or otherwise * we run into recursion during operation on those fences. */ static inline bool dma_fence_is_container(struct dma_fence *fence) { return dma_fence_is_array(fence) || dma_fence_is_chain(fence); } #endif /* __LINUX_DMA_FENCE_H */ |
5 5 5 5 5 4 1 1 5 5 5 3 3 1 4 5 4 5 5 4 4 3 2 1 1 1 3 5 3 5 5 3 2 1 1 2 5 2 2 1 1 2 2 2 2 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 | // SPDX-License-Identifier: GPL-2.0-or-later /* * cpia CPiA (1) gspca driver * * Copyright (C) 2010-2011 Hans de Goede <hdegoede@redhat.com> * * This module is adapted from the in kernel v4l1 cpia driver which is : * * (C) Copyright 1999-2000 Peter Pregler * (C) Copyright 1999-2000 Scott J. Bertin * (C) Copyright 1999-2000 Johannes Erdfelt <johannes@erdfelt.com> * (C) Copyright 2000 STMicroelectronics */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "cpia1" #include <linux/input.h> #include <linux/sched/signal.h> #include <linux/bitops.h> #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Vision CPiA"); MODULE_LICENSE("GPL"); /* constant value's */ #define MAGIC_0 0x19 #define MAGIC_1 0x68 #define DATA_IN 0xc0 #define DATA_OUT 0x40 #define VIDEOSIZE_QCIF 0 /* 176x144 */ #define VIDEOSIZE_CIF 1 /* 352x288 */ #define SUBSAMPLE_420 0 #define SUBSAMPLE_422 1 #define YUVORDER_YUYV 0 #define YUVORDER_UYVY 1 #define NOT_COMPRESSED 0 #define COMPRESSED 1 #define NO_DECIMATION 0 #define DECIMATION_ENAB 1 #define EOI 0xff /* End Of Image */ #define EOL 0xfd /* End Of Line */ #define FRAME_HEADER_SIZE 64 /* Image grab modes */ #define CPIA_GRAB_SINGLE 0 #define CPIA_GRAB_CONTINEOUS 1 /* Compression parameters */ #define CPIA_COMPRESSION_NONE 0 #define CPIA_COMPRESSION_AUTO 1 #define CPIA_COMPRESSION_MANUAL 2 #define CPIA_COMPRESSION_TARGET_QUALITY 0 #define CPIA_COMPRESSION_TARGET_FRAMERATE 1 /* Return offsets for GetCameraState */ #define SYSTEMSTATE 0 #define GRABSTATE 1 #define STREAMSTATE 2 #define FATALERROR 3 #define CMDERROR 4 #define DEBUGFLAGS 5 #define VPSTATUS 6 #define ERRORCODE 7 /* SystemState */ #define UNINITIALISED_STATE 0 #define PASS_THROUGH_STATE 1 #define LO_POWER_STATE 2 #define HI_POWER_STATE 3 #define WARM_BOOT_STATE 4 /* GrabState */ #define GRAB_IDLE 0 #define GRAB_ACTIVE 1 #define GRAB_DONE 2 /* StreamState */ #define STREAM_NOT_READY 0 #define STREAM_READY 1 #define STREAM_OPEN 2 #define STREAM_PAUSED 3 #define STREAM_FINISHED 4 /* Fatal Error, CmdError, and DebugFlags */ #define CPIA_FLAG 1 #define SYSTEM_FLAG 2 #define INT_CTRL_FLAG 4 #define PROCESS_FLAG 8 #define COM_FLAG 16 #define VP_CTRL_FLAG 32 #define CAPTURE_FLAG 64 #define DEBUG_FLAG 128 /* VPStatus */ #define VP_STATE_OK 0x00 #define VP_STATE_FAILED_VIDEOINIT 0x01 #define VP_STATE_FAILED_AECACBINIT 0x02 #define VP_STATE_AEC_MAX 0x04 #define VP_STATE_ACB_BMAX 0x08 #define VP_STATE_ACB_RMIN 0x10 #define VP_STATE_ACB_GMIN 0x20 #define VP_STATE_ACB_RMAX 0x40 #define VP_STATE_ACB_GMAX 0x80 /* default (minimum) compensation values */ #define COMP_RED 220 #define COMP_GREEN1 214 #define COMP_GREEN2 COMP_GREEN1 #define COMP_BLUE 230 /* exposure status */ #define EXPOSURE_VERY_LIGHT 0 #define EXPOSURE_LIGHT 1 #define EXPOSURE_NORMAL 2 #define EXPOSURE_DARK 3 #define EXPOSURE_VERY_DARK 4 #define CPIA_MODULE_CPIA (0 << 5) #define CPIA_MODULE_SYSTEM (1 << 5) #define CPIA_MODULE_VP_CTRL (5 << 5) #define CPIA_MODULE_CAPTURE (6 << 5) #define CPIA_MODULE_DEBUG (7 << 5) #define INPUT (DATA_IN << 8) #define OUTPUT (DATA_OUT << 8) #define CPIA_COMMAND_GetCPIAVersion (INPUT | CPIA_MODULE_CPIA | 1) #define CPIA_COMMAND_GetPnPID (INPUT | CPIA_MODULE_CPIA | 2) #define CPIA_COMMAND_GetCameraStatus (INPUT | CPIA_MODULE_CPIA | 3) #define CPIA_COMMAND_GotoHiPower (OUTPUT | CPIA_MODULE_CPIA | 4) #define CPIA_COMMAND_GotoLoPower (OUTPUT | CPIA_MODULE_CPIA | 5) #define CPIA_COMMAND_GotoSuspend (OUTPUT | CPIA_MODULE_CPIA | 7) #define CPIA_COMMAND_GotoPassThrough (OUTPUT | CPIA_MODULE_CPIA | 8) #define CPIA_COMMAND_ModifyCameraStatus (OUTPUT | CPIA_MODULE_CPIA | 10) #define CPIA_COMMAND_ReadVCRegs (INPUT | CPIA_MODULE_SYSTEM | 1) #define CPIA_COMMAND_WriteVCReg (OUTPUT | CPIA_MODULE_SYSTEM | 2) #define CPIA_COMMAND_ReadMCPorts (INPUT | CPIA_MODULE_SYSTEM | 3) #define CPIA_COMMAND_WriteMCPort (OUTPUT | CPIA_MODULE_SYSTEM | 4) #define CPIA_COMMAND_SetBaudRate (OUTPUT | CPIA_MODULE_SYSTEM | 5) #define CPIA_COMMAND_SetECPTiming (OUTPUT | CPIA_MODULE_SYSTEM | 6) #define CPIA_COMMAND_ReadIDATA (INPUT | CPIA_MODULE_SYSTEM | 7) #define CPIA_COMMAND_WriteIDATA (OUTPUT | CPIA_MODULE_SYSTEM | 8) #define CPIA_COMMAND_GenericCall (OUTPUT | CPIA_MODULE_SYSTEM | 9) #define CPIA_COMMAND_I2CStart (OUTPUT | CPIA_MODULE_SYSTEM | 10) #define CPIA_COMMAND_I2CStop (OUTPUT | CPIA_MODULE_SYSTEM | 11) #define CPIA_COMMAND_I2CWrite (OUTPUT | CPIA_MODULE_SYSTEM | 12) #define CPIA_COMMAND_I2CRead (INPUT | CPIA_MODULE_SYSTEM | 13) #define CPIA_COMMAND_GetVPVersion (INPUT | CPIA_MODULE_VP_CTRL | 1) #define CPIA_COMMAND_ResetFrameCounter (INPUT | CPIA_MODULE_VP_CTRL | 2) #define CPIA_COMMAND_SetColourParams (OUTPUT | CPIA_MODULE_VP_CTRL | 3) #define CPIA_COMMAND_SetExposure (OUTPUT | CPIA_MODULE_VP_CTRL | 4) #define CPIA_COMMAND_SetColourBalance (OUTPUT | CPIA_MODULE_VP_CTRL | 6) #define CPIA_COMMAND_SetSensorFPS (OUTPUT | CPIA_MODULE_VP_CTRL | 7) #define CPIA_COMMAND_SetVPDefaults (OUTPUT | CPIA_MODULE_VP_CTRL | 8) #define CPIA_COMMAND_SetApcor (OUTPUT | CPIA_MODULE_VP_CTRL | 9) #define CPIA_COMMAND_SetFlickerCtrl (OUTPUT | CPIA_MODULE_VP_CTRL | 10) #define CPIA_COMMAND_SetVLOffset (OUTPUT | CPIA_MODULE_VP_CTRL | 11) #define CPIA_COMMAND_GetColourParams (INPUT | CPIA_MODULE_VP_CTRL | 16) #define CPIA_COMMAND_GetColourBalance (INPUT | CPIA_MODULE_VP_CTRL | 17) #define CPIA_COMMAND_GetExposure (INPUT | CPIA_MODULE_VP_CTRL | 18) #define CPIA_COMMAND_SetSensorMatrix (OUTPUT | CPIA_MODULE_VP_CTRL | 19) #define CPIA_COMMAND_ColourBars (OUTPUT | CPIA_MODULE_VP_CTRL | 25) #define CPIA_COMMAND_ReadVPRegs (INPUT | CPIA_MODULE_VP_CTRL | 30) #define CPIA_COMMAND_WriteVPReg (OUTPUT | CPIA_MODULE_VP_CTRL | 31) #define CPIA_COMMAND_GrabFrame (OUTPUT | CPIA_MODULE_CAPTURE | 1) #define CPIA_COMMAND_UploadFrame (OUTPUT | CPIA_MODULE_CAPTURE | 2) #define CPIA_COMMAND_SetGrabMode (OUTPUT | CPIA_MODULE_CAPTURE | 3) #define CPIA_COMMAND_InitStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 4) #define CPIA_COMMAND_FiniStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 5) #define CPIA_COMMAND_StartStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 6) #define CPIA_COMMAND_EndStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 7) #define CPIA_COMMAND_SetFormat (OUTPUT | CPIA_MODULE_CAPTURE | 8) #define CPIA_COMMAND_SetROI (OUTPUT | CPIA_MODULE_CAPTURE | 9) #define CPIA_COMMAND_SetCompression (OUTPUT | CPIA_MODULE_CAPTURE | 10) #define CPIA_COMMAND_SetCompressionTarget (OUTPUT | CPIA_MODULE_CAPTURE | 11) #define CPIA_COMMAND_SetYUVThresh (OUTPUT | CPIA_MODULE_CAPTURE | 12) #define CPIA_COMMAND_SetCompressionParams (OUTPUT | CPIA_MODULE_CAPTURE | 13) #define CPIA_COMMAND_DiscardFrame (OUTPUT | CPIA_MODULE_CAPTURE | 14) #define CPIA_COMMAND_GrabReset (OUTPUT | CPIA_MODULE_CAPTURE | 15) #define CPIA_COMMAND_OutputRS232 (OUTPUT | CPIA_MODULE_DEBUG | 1) #define CPIA_COMMAND_AbortProcess (OUTPUT | CPIA_MODULE_DEBUG | 4) #define CPIA_COMMAND_SetDramPage (OUTPUT | CPIA_MODULE_DEBUG | 5) #define CPIA_COMMAND_StartDramUpload (OUTPUT | CPIA_MODULE_DEBUG | 6) #define CPIA_COMMAND_StartDummyDtream (OUTPUT | CPIA_MODULE_DEBUG | 8) #define CPIA_COMMAND_AbortStream (OUTPUT | CPIA_MODULE_DEBUG | 9) #define CPIA_COMMAND_DownloadDRAM (OUTPUT | CPIA_MODULE_DEBUG | 10) #define CPIA_COMMAND_Null (OUTPUT | CPIA_MODULE_DEBUG | 11) #define ROUND_UP_EXP_FOR_FLICKER 15 /* Constants for automatic frame rate adjustment */ #define MAX_EXP 302 #define MAX_EXP_102 255 #define LOW_EXP 140 #define VERY_LOW_EXP 70 #define TC 94 #define EXP_ACC_DARK 50 #define EXP_ACC_LIGHT 90 #define HIGH_COMP_102 160 #define MAX_COMP 239 #define DARK_TIME 3 #define LIGHT_TIME 3 #define FIRMWARE_VERSION(x, y) (sd->params.version.firmwareVersion == (x) && \ sd->params.version.firmwareRevision == (y)) #define CPIA1_CID_COMP_TARGET (V4L2_CTRL_CLASS_USER + 0x1000) #define BRIGHTNESS_DEF 50 #define CONTRAST_DEF 48 #define SATURATION_DEF 50 #define FREQ_DEF V4L2_CID_POWER_LINE_FREQUENCY_50HZ #define ILLUMINATORS_1_DEF 0 #define ILLUMINATORS_2_DEF 0 #define COMP_TARGET_DEF CPIA_COMPRESSION_TARGET_QUALITY /* Developer's Guide Table 5 p 3-34 * indexed by [mains][sensorFps.baserate][sensorFps.divisor]*/ static u8 flicker_jumps[2][2][4] = { { { 76, 38, 19, 9 }, { 92, 46, 23, 11 } }, { { 64, 32, 16, 8 }, { 76, 38, 19, 9} } }; struct cam_params { struct { u8 firmwareVersion; u8 firmwareRevision; u8 vcVersion; u8 vcRevision; } version; struct { u16 vendor; u16 product; u16 deviceRevision; } pnpID; struct { u8 vpVersion; u8 vpRevision; u16 cameraHeadID; } vpVersion; struct { u8 systemState; u8 grabState; u8 streamState; u8 fatalError; u8 cmdError; u8 debugFlags; u8 vpStatus; u8 errorCode; } status; struct { u8 brightness; u8 contrast; u8 saturation; } colourParams; struct { u8 gainMode; u8 expMode; u8 compMode; u8 centreWeight; u8 gain; u8 fineExp; u8 coarseExpLo; u8 coarseExpHi; u8 redComp; u8 green1Comp; u8 green2Comp; u8 blueComp; } exposure; struct { u8 balanceMode; u8 redGain; u8 greenGain; u8 blueGain; } colourBalance; struct { u8 divisor; u8 baserate; } sensorFps; struct { u8 gain1; u8 gain2; u8 gain4; u8 gain8; } apcor; struct { u8 disabled; u8 flickerMode; u8 coarseJump; u8 allowableOverExposure; } flickerControl; struct { u8 gain1; u8 gain2; u8 gain4; u8 gain8; } vlOffset; struct { u8 mode; u8 decimation; } compression; struct { u8 frTargeting; u8 targetFR; u8 targetQ; } compressionTarget; struct { u8 yThreshold; u8 uvThreshold; } yuvThreshold; struct { u8 hysteresis; u8 threshMax; u8 smallStep; u8 largeStep; u8 decimationHysteresis; u8 frDiffStepThresh; u8 qDiffStepThresh; u8 decimationThreshMod; } compressionParams; struct { u8 videoSize; /* CIF/QCIF */ u8 subSample; u8 yuvOrder; } format; struct { /* Intel QX3 specific data */ u8 qx3_detected; /* a QX3 is present */ u8 toplight; /* top light lit , R/W */ u8 bottomlight; /* bottom light lit, R/W */ u8 button; /* snapshot button pressed (R/O) */ u8 cradled; /* microscope is in cradle (R/O) */ } qx3; struct { u8 colStart; /* skip first 8*colStart pixels */ u8 colEnd; /* finish at 8*colEnd pixels */ u8 rowStart; /* skip first 4*rowStart lines */ u8 rowEnd; /* finish at 4*rowEnd lines */ } roi; u8 ecpTiming; u8 streamStartLine; }; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct cam_params params; /* camera settings */ atomic_t cam_exposure; atomic_t fps; int exposure_count; u8 exposure_status; struct v4l2_ctrl *freq; u8 mainsFreq; /* 0 = 50hz, 1 = 60hz */ u8 first_frame; }; static const struct v4l2_pix_format mode[] = { {160, 120, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, /* The sizeimage is trial and error, as with low framerates * the camera will pad out usb frames, making the image * data larger than strictly necessary */ .bytesperline = 160, .sizeimage = 65536, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3}, {176, 144, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, .bytesperline = 172, .sizeimage = 65536, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 262144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 262144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /********************************************************************** * * General functions * **********************************************************************/ static int cpia_usb_transferCmd(struct gspca_dev *gspca_dev, u8 *command) { u8 requesttype; unsigned int pipe; int ret, databytes = command[6] | (command[7] << 8); /* Sometimes we see spurious EPIPE errors */ int retries = 3; if (command[0] == DATA_IN) { pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); requesttype = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE; } else if (command[0] == DATA_OUT) { pipe = usb_sndctrlpipe(gspca_dev->dev, 0); requesttype = USB_TYPE_VENDOR | USB_RECIP_DEVICE; } else { gspca_err(gspca_dev, "Unexpected first byte of command: %x\n", command[0]); return -EINVAL; } retry: ret = usb_control_msg(gspca_dev->dev, pipe, command[1], requesttype, command[2] | (command[3] << 8), command[4] | (command[5] << 8), gspca_dev->usb_buf, databytes, 1000); if (ret < 0) pr_err("usb_control_msg %02x, error %d\n", command[1], ret); if (ret == -EPIPE && retries > 0) { retries--; goto retry; } return (ret < 0) ? ret : 0; } /* send an arbitrary command to the camera */ static int do_command(struct gspca_dev *gspca_dev, u16 command, u8 a, u8 b, u8 c, u8 d) { struct sd *sd = (struct sd *) gspca_dev; int ret, datasize; u8 cmd[8]; switch (command) { case CPIA_COMMAND_GetCPIAVersion: case CPIA_COMMAND_GetPnPID: case CPIA_COMMAND_GetCameraStatus: case CPIA_COMMAND_GetVPVersion: case CPIA_COMMAND_GetColourParams: case CPIA_COMMAND_GetColourBalance: case CPIA_COMMAND_GetExposure: datasize = 8; break; case CPIA_COMMAND_ReadMCPorts: case CPIA_COMMAND_ReadVCRegs: datasize = 4; break; default: datasize = 0; break; } cmd[0] = command >> 8; cmd[1] = command & 0xff; cmd[2] = a; cmd[3] = b; cmd[4] = c; cmd[5] = d; cmd[6] = datasize; cmd[7] = 0; ret = cpia_usb_transferCmd(gspca_dev, cmd); if (ret) return ret; switch (command) { case CPIA_COMMAND_GetCPIAVersion: sd->params.version.firmwareVersion = gspca_dev->usb_buf[0]; sd->params.version.firmwareRevision = gspca_dev->usb_buf[1]; sd->params.version.vcVersion = gspca_dev->usb_buf[2]; sd->params.version.vcRevision = gspca_dev->usb_buf[3]; break; case CPIA_COMMAND_GetPnPID: sd->params.pnpID.vendor = gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8); sd->params.pnpID.product = gspca_dev->usb_buf[2] | (gspca_dev->usb_buf[3] << 8); sd->params.pnpID.deviceRevision = gspca_dev->usb_buf[4] | (gspca_dev->usb_buf[5] << 8); break; case CPIA_COMMAND_GetCameraStatus: sd->params.status.systemState = gspca_dev->usb_buf[0]; sd->params.status.grabState = gspca_dev->usb_buf[1]; sd->params.status.streamState = gspca_dev->usb_buf[2]; sd->params.status.fatalError = gspca_dev->usb_buf[3]; sd->params.status.cmdError = gspca_dev->usb_buf[4]; sd->params.status.debugFlags = gspca_dev->usb_buf[5]; sd->params.status.vpStatus = gspca_dev->usb_buf[6]; sd->params.status.errorCode = gspca_dev->usb_buf[7]; break; case CPIA_COMMAND_GetVPVersion: sd->params.vpVersion.vpVersion = gspca_dev->usb_buf[0]; sd->params.vpVersion.vpRevision = gspca_dev->usb_buf[1]; sd->params.vpVersion.cameraHeadID = gspca_dev->usb_buf[2] | (gspca_dev->usb_buf[3] << 8); break; case CPIA_COMMAND_GetColourParams: sd->params.colourParams.brightness = gspca_dev->usb_buf[0]; sd->params.colourParams.contrast = gspca_dev->usb_buf[1]; sd->params.colourParams.saturation = gspca_dev->usb_buf[2]; break; case CPIA_COMMAND_GetColourBalance: sd->params.colourBalance.redGain = gspca_dev->usb_buf[0]; sd->params.colourBalance.greenGain = gspca_dev->usb_buf[1]; sd->params.colourBalance.blueGain = gspca_dev->usb_buf[2]; break; case CPIA_COMMAND_GetExposure: sd->params.exposure.gain = gspca_dev->usb_buf[0]; sd->params.exposure.fineExp = gspca_dev->usb_buf[1]; sd->params.exposure.coarseExpLo = gspca_dev->usb_buf[2]; sd->params.exposure.coarseExpHi = gspca_dev->usb_buf[3]; sd->params.exposure.redComp = gspca_dev->usb_buf[4]; sd->params.exposure.green1Comp = gspca_dev->usb_buf[5]; sd->params.exposure.green2Comp = gspca_dev->usb_buf[6]; sd->params.exposure.blueComp = gspca_dev->usb_buf[7]; break; case CPIA_COMMAND_ReadMCPorts: /* test button press */ a = ((gspca_dev->usb_buf[1] & 0x02) == 0); if (a != sd->params.qx3.button) { #if IS_ENABLED(CONFIG_INPUT) input_report_key(gspca_dev->input_dev, KEY_CAMERA, a); input_sync(gspca_dev->input_dev); #endif sd->params.qx3.button = a; } if (sd->params.qx3.button) { /* button pressed - unlock the latch */ ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 3, 0xdf, 0xdf, 0); if (ret) return ret; ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 3, 0xff, 0xff, 0); if (ret) return ret; } /* test whether microscope is cradled */ sd->params.qx3.cradled = ((gspca_dev->usb_buf[2] & 0x40) == 0); break; } return 0; } /* send a command to the camera with an additional data transaction */ static int do_command_extended(struct gspca_dev *gspca_dev, u16 command, u8 a, u8 b, u8 c, u8 d, u8 e, u8 f, u8 g, u8 h, u8 i, u8 j, u8 k, u8 l) { u8 cmd[8]; cmd[0] = command >> 8; cmd[1] = command & 0xff; cmd[2] = a; cmd[3] = b; cmd[4] = c; cmd[5] = d; cmd[6] = 8; cmd[7] = 0; gspca_dev->usb_buf[0] = e; gspca_dev->usb_buf[1] = f; gspca_dev->usb_buf[2] = g; gspca_dev->usb_buf[3] = h; gspca_dev->usb_buf[4] = i; gspca_dev->usb_buf[5] = j; gspca_dev->usb_buf[6] = k; gspca_dev->usb_buf[7] = l; return cpia_usb_transferCmd(gspca_dev, cmd); } /* find_over_exposure * Finds a suitable value of OverExposure for use with SetFlickerCtrl * Some calculation is required because this value changes with the brightness * set with SetColourParameters * * Parameters: Brightness - last brightness value set with SetColourParameters * * Returns: OverExposure value to use with SetFlickerCtrl */ #define FLICKER_MAX_EXPOSURE 250 #define FLICKER_ALLOWABLE_OVER_EXPOSURE 146 #define FLICKER_BRIGHTNESS_CONSTANT 59 static int find_over_exposure(int brightness) { int MaxAllowableOverExposure, OverExposure; MaxAllowableOverExposure = FLICKER_MAX_EXPOSURE - brightness - FLICKER_BRIGHTNESS_CONSTANT; OverExposure = min(MaxAllowableOverExposure, FLICKER_ALLOWABLE_OVER_EXPOSURE); return OverExposure; } #undef FLICKER_MAX_EXPOSURE #undef FLICKER_ALLOWABLE_OVER_EXPOSURE #undef FLICKER_BRIGHTNESS_CONSTANT /* initialise cam_data structure */ static void reset_camera_params(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct cam_params *params = &sd->params; /* The following parameter values are the defaults from * "Software Developer's Guide for CPiA Cameras". Any changes * to the defaults are noted in comments. */ params->colourParams.brightness = BRIGHTNESS_DEF; params->colourParams.contrast = CONTRAST_DEF; params->colourParams.saturation = SATURATION_DEF; params->exposure.gainMode = 4; params->exposure.expMode = 2; /* AEC */ params->exposure.compMode = 1; params->exposure.centreWeight = 1; params->exposure.gain = 0; params->exposure.fineExp = 0; params->exposure.coarseExpLo = 185; params->exposure.coarseExpHi = 0; params->exposure.redComp = COMP_RED; params->exposure.green1Comp = COMP_GREEN1; params->exposure.green2Comp = COMP_GREEN2; params->exposure.blueComp = COMP_BLUE; params->colourBalance.balanceMode = 2; /* ACB */ params->colourBalance.redGain = 32; params->colourBalance.greenGain = 6; params->colourBalance.blueGain = 92; params->apcor.gain1 = 0x18; params->apcor.gain2 = 0x16; params->apcor.gain4 = 0x24; params->apcor.gain8 = 0x34; params->vlOffset.gain1 = 20; params->vlOffset.gain2 = 24; params->vlOffset.gain4 = 26; params->vlOffset.gain8 = 26; params->compressionParams.hysteresis = 3; params->compressionParams.threshMax = 11; params->compressionParams.smallStep = 1; params->compressionParams.largeStep = 3; params->compressionParams.decimationHysteresis = 2; params->compressionParams.frDiffStepThresh = 5; params->compressionParams.qDiffStepThresh = 3; params->compressionParams.decimationThreshMod = 2; /* End of default values from Software Developer's Guide */ /* Set Sensor FPS to 15fps. This seems better than 30fps * for indoor lighting. */ params->sensorFps.divisor = 1; params->sensorFps.baserate = 1; params->flickerControl.flickerMode = 0; params->flickerControl.disabled = 1; params->flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [params->sensorFps.baserate] [params->sensorFps.divisor]; params->flickerControl.allowableOverExposure = find_over_exposure(params->colourParams.brightness); params->yuvThreshold.yThreshold = 6; /* From windows driver */ params->yuvThreshold.uvThreshold = 6; /* From windows driver */ params->format.subSample = SUBSAMPLE_420; params->format.yuvOrder = YUVORDER_YUYV; params->compression.mode = CPIA_COMPRESSION_AUTO; params->compression.decimation = NO_DECIMATION; params->compressionTarget.frTargeting = COMP_TARGET_DEF; params->compressionTarget.targetFR = 15; /* From windows driver */ params->compressionTarget.targetQ = 5; /* From windows driver */ params->qx3.qx3_detected = 0; params->qx3.toplight = 0; params->qx3.bottomlight = 0; params->qx3.button = 0; params->qx3.cradled = 0; } static void printstatus(struct gspca_dev *gspca_dev, struct cam_params *params) { gspca_dbg(gspca_dev, D_PROBE, "status: %02x %02x %02x %02x %02x %02x %02x %02x\n", params->status.systemState, params->status.grabState, params->status.streamState, params->status.fatalError, params->status.cmdError, params->status.debugFlags, params->status.vpStatus, params->status.errorCode); } static int goto_low_power(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command(gspca_dev, CPIA_COMMAND_GotoLoPower, 0, 0, 0, 0); if (ret) return ret; ret = do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); if (ret) return ret; if (sd->params.status.systemState != LO_POWER_STATE) { if (sd->params.status.systemState != WARM_BOOT_STATE) { gspca_err(gspca_dev, "unexpected state after lo power cmd: %02x\n", sd->params.status.systemState); printstatus(gspca_dev, &sd->params); } return -EIO; } gspca_dbg(gspca_dev, D_CONF, "camera now in LOW power state\n"); return 0; } static int goto_high_power(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command(gspca_dev, CPIA_COMMAND_GotoHiPower, 0, 0, 0, 0); if (ret) return ret; msleep_interruptible(40); /* windows driver does it too */ if (signal_pending(current)) return -EINTR; ret = do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); if (ret) return ret; if (sd->params.status.systemState != HI_POWER_STATE) { gspca_err(gspca_dev, "unexpected state after hi power cmd: %02x\n", sd->params.status.systemState); printstatus(gspca_dev, &sd->params); return -EIO; } gspca_dbg(gspca_dev, D_CONF, "camera now in HIGH power state\n"); return 0; } static int get_version_information(struct gspca_dev *gspca_dev) { int ret; /* GetCPIAVersion */ ret = do_command(gspca_dev, CPIA_COMMAND_GetCPIAVersion, 0, 0, 0, 0); if (ret) return ret; /* GetPnPID */ return do_command(gspca_dev, CPIA_COMMAND_GetPnPID, 0, 0, 0, 0); } static int save_camera_state(struct gspca_dev *gspca_dev) { int ret; ret = do_command(gspca_dev, CPIA_COMMAND_GetColourBalance, 0, 0, 0, 0); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0); } static int command_setformat(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command(gspca_dev, CPIA_COMMAND_SetFormat, sd->params.format.videoSize, sd->params.format.subSample, sd->params.format.yuvOrder, 0); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_SetROI, sd->params.roi.colStart, sd->params.roi.colEnd, sd->params.roi.rowStart, sd->params.roi.rowEnd); } static int command_setcolourparams(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetColourParams, sd->params.colourParams.brightness, sd->params.colourParams.contrast, sd->params.colourParams.saturation, 0); } static int command_setapcor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetApcor, sd->params.apcor.gain1, sd->params.apcor.gain2, sd->params.apcor.gain4, sd->params.apcor.gain8); } static int command_setvloffset(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetVLOffset, sd->params.vlOffset.gain1, sd->params.vlOffset.gain2, sd->params.vlOffset.gain4, sd->params.vlOffset.gain8); } static int command_setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command_extended(gspca_dev, CPIA_COMMAND_SetExposure, sd->params.exposure.gainMode, 1, sd->params.exposure.compMode, sd->params.exposure.centreWeight, sd->params.exposure.gain, sd->params.exposure.fineExp, sd->params.exposure.coarseExpLo, sd->params.exposure.coarseExpHi, sd->params.exposure.redComp, sd->params.exposure.green1Comp, sd->params.exposure.green2Comp, sd->params.exposure.blueComp); if (ret) return ret; if (sd->params.exposure.expMode != 1) { ret = do_command_extended(gspca_dev, CPIA_COMMAND_SetExposure, 0, sd->params.exposure.expMode, 0, 0, sd->params.exposure.gain, sd->params.exposure.fineExp, sd->params.exposure.coarseExpLo, sd->params.exposure.coarseExpHi, 0, 0, 0, 0); } return ret; } static int command_setcolourbalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->params.colourBalance.balanceMode == 1) { int ret; ret = do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 1, sd->params.colourBalance.redGain, sd->params.colourBalance.greenGain, sd->params.colourBalance.blueGain); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 3, 0, 0, 0); } if (sd->params.colourBalance.balanceMode == 2) { return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 2, 0, 0, 0); } if (sd->params.colourBalance.balanceMode == 3) { return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 3, 0, 0, 0); } return -EINVAL; } static int command_setcompressiontarget(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetCompressionTarget, sd->params.compressionTarget.frTargeting, sd->params.compressionTarget.targetFR, sd->params.compressionTarget.targetQ, 0); } static int command_setyuvtresh(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetYUVThresh, sd->params.yuvThreshold.yThreshold, sd->params.yuvThreshold.uvThreshold, 0, 0); } static int command_setcompressionparams(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command_extended(gspca_dev, CPIA_COMMAND_SetCompressionParams, 0, 0, 0, 0, sd->params.compressionParams.hysteresis, sd->params.compressionParams.threshMax, sd->params.compressionParams.smallStep, sd->params.compressionParams.largeStep, sd->params.compressionParams.decimationHysteresis, sd->params.compressionParams.frDiffStepThresh, sd->params.compressionParams.qDiffStepThresh, sd->params.compressionParams.decimationThreshMod); } static int command_setcompression(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetCompression, sd->params.compression.mode, sd->params.compression.decimation, 0, 0); } static int command_setsensorfps(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetSensorFPS, sd->params.sensorFps.divisor, sd->params.sensorFps.baserate, 0, 0); } static int command_setflickerctrl(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetFlickerCtrl, sd->params.flickerControl.flickerMode, sd->params.flickerControl.coarseJump, sd->params.flickerControl.allowableOverExposure, 0); } static int command_setecptiming(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetECPTiming, sd->params.ecpTiming, 0, 0, 0); } static int command_pause(struct gspca_dev *gspca_dev) { return do_command(gspca_dev, CPIA_COMMAND_EndStreamCap, 0, 0, 0, 0); } static int command_resume(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_InitStreamCap, 0, sd->params.streamStartLine, 0, 0); } static int command_setlights(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret, p1, p2; p1 = (sd->params.qx3.bottomlight == 0) << 1; p2 = (sd->params.qx3.toplight == 0) << 3; ret = do_command(gspca_dev, CPIA_COMMAND_WriteVCReg, 0x90, 0x8f, 0x50, 0); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0, p1 | p2 | 0xe0, 0); } static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply) { /* Everything in here is from the Windows driver */ /* define for compgain calculation */ #if 0 #define COMPGAIN(base, curexp, newexp) \ (u8) ((((float) base - 128.0) * ((float) curexp / (float) newexp)) + 128.5) #define EXP_FROM_COMP(basecomp, curcomp, curexp) \ (u16)((float)curexp * (float)(u8)(curcomp + 128) / \ (float)(u8)(basecomp - 128)) #else /* equivalent functions without floating point math */ #define COMPGAIN(base, curexp, newexp) \ (u8)(128 + (((u32)(2*(base-128)*curexp + newexp)) / (2 * newexp))) #define EXP_FROM_COMP(basecomp, curcomp, curexp) \ (u16)(((u32)(curexp * (u8)(curcomp + 128)) / (u8)(basecomp - 128))) #endif struct sd *sd = (struct sd *) gspca_dev; int currentexp = sd->params.exposure.coarseExpLo + sd->params.exposure.coarseExpHi * 256; int ret, startexp; if (on) { int cj = sd->params.flickerControl.coarseJump; sd->params.flickerControl.flickerMode = 1; sd->params.flickerControl.disabled = 0; if (sd->params.exposure.expMode != 2) { sd->params.exposure.expMode = 2; sd->exposure_status = EXPOSURE_NORMAL; } if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp)) return -EINVAL; currentexp = currentexp << sd->params.exposure.gain; sd->params.exposure.gain = 0; /* round down current exposure to nearest value */ startexp = (currentexp + ROUND_UP_EXP_FOR_FLICKER) / cj; if (startexp < 1) startexp = 1; startexp = (startexp * cj) - 1; if (FIRMWARE_VERSION(1, 2)) while (startexp > MAX_EXP_102) startexp -= cj; else while (startexp > MAX_EXP) startexp -= cj; sd->params.exposure.coarseExpLo = startexp & 0xff; sd->params.exposure.coarseExpHi = startexp >> 8; if (currentexp > startexp) { if (currentexp > (2 * startexp)) currentexp = 2 * startexp; sd->params.exposure.redComp = COMPGAIN(COMP_RED, currentexp, startexp); sd->params.exposure.green1Comp = COMPGAIN(COMP_GREEN1, currentexp, startexp); sd->params.exposure.green2Comp = COMPGAIN(COMP_GREEN2, currentexp, startexp); sd->params.exposure.blueComp = COMPGAIN(COMP_BLUE, currentexp, startexp); } else { sd->params.exposure.redComp = COMP_RED; sd->params.exposure.green1Comp = COMP_GREEN1; sd->params.exposure.green2Comp = COMP_GREEN2; sd->params.exposure.blueComp = COMP_BLUE; } if (FIRMWARE_VERSION(1, 2)) sd->params.exposure.compMode = 0; else sd->params.exposure.compMode = 1; sd->params.apcor.gain1 = 0x18; sd->params.apcor.gain2 = 0x18; sd->params.apcor.gain4 = 0x16; sd->params.apcor.gain8 = 0x14; } else { sd->params.flickerControl.flickerMode = 0; sd->params.flickerControl.disabled = 1; /* Average equivalent coarse for each comp channel */ startexp = EXP_FROM_COMP(COMP_RED, sd->params.exposure.redComp, currentexp); startexp += EXP_FROM_COMP(COMP_GREEN1, sd->params.exposure.green1Comp, currentexp); startexp += EXP_FROM_COMP(COMP_GREEN2, sd->params.exposure.green2Comp, currentexp); startexp += EXP_FROM_COMP(COMP_BLUE, sd->params.exposure.blueComp, currentexp); startexp = startexp >> 2; while (startexp > MAX_EXP && sd->params.exposure.gain < sd->params.exposure.gainMode - 1) { startexp = startexp >> 1; ++sd->params.exposure.gain; } if (FIRMWARE_VERSION(1, 2) && startexp > MAX_EXP_102) startexp = MAX_EXP_102; if (startexp > MAX_EXP) startexp = MAX_EXP; sd->params.exposure.coarseExpLo = startexp & 0xff; sd->params.exposure.coarseExpHi = startexp >> 8; sd->params.exposure.redComp = COMP_RED; sd->params.exposure.green1Comp = COMP_GREEN1; sd->params.exposure.green2Comp = COMP_GREEN2; sd->params.exposure.blueComp = COMP_BLUE; sd->params.exposure.compMode = 1; sd->params.apcor.gain1 = 0x18; sd->params.apcor.gain2 = 0x16; sd->params.apcor.gain4 = 0x24; sd->params.apcor.gain8 = 0x34; } sd->params.vlOffset.gain1 = 20; sd->params.vlOffset.gain2 = 24; sd->params.vlOffset.gain4 = 26; sd->params.vlOffset.gain8 = 26; if (apply) { ret = command_setexposure(gspca_dev); if (ret) return ret; ret = command_setapcor(gspca_dev); if (ret) return ret; ret = command_setvloffset(gspca_dev); if (ret) return ret; ret = command_setflickerctrl(gspca_dev); if (ret) return ret; } return 0; #undef EXP_FROM_COMP #undef COMPGAIN } /* monitor the exposure and adjust the sensor frame rate if needed */ static void monitor_exposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 exp_acc, bcomp, cmd[8]; int ret, light_exp, dark_exp, very_dark_exp; int old_exposure, new_exposure, framerate; int setfps = 0, setexp = 0, setflicker = 0; /* get necessary stats and register settings from camera */ /* do_command can't handle this, so do it ourselves */ cmd[0] = CPIA_COMMAND_ReadVPRegs >> 8; cmd[1] = CPIA_COMMAND_ReadVPRegs & 0xff; cmd[2] = 30; cmd[3] = 4; cmd[4] = 9; cmd[5] = 8; cmd[6] = 8; cmd[7] = 0; ret = cpia_usb_transferCmd(gspca_dev, cmd); if (ret) { pr_err("ReadVPRegs(30,4,9,8) - failed: %d\n", ret); return; } exp_acc = gspca_dev->usb_buf[0]; bcomp = gspca_dev->usb_buf[1]; light_exp = sd->params.colourParams.brightness + TC - 50 + EXP_ACC_LIGHT; if (light_exp > 255) light_exp = 255; dark_exp = sd->params.colourParams.brightness + TC - 50 - EXP_ACC_DARK; if (dark_exp < 0) dark_exp = 0; very_dark_exp = dark_exp / 2; old_exposure = sd->params.exposure.coarseExpHi * 256 + sd->params.exposure.coarseExpLo; if (!sd->params.flickerControl.disabled) { /* Flicker control on */ int max_comp = FIRMWARE_VERSION(1, 2) ? MAX_COMP : HIGH_COMP_102; bcomp += 128; /* decode */ if (bcomp >= max_comp && exp_acc < dark_exp) { /* dark */ if (exp_acc < very_dark_exp) { /* very dark */ if (sd->exposure_status == EXPOSURE_VERY_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_DARK; sd->exposure_count = 1; } } else { /* just dark */ if (sd->exposure_status == EXPOSURE_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_DARK; sd->exposure_count = 1; } } } else if (old_exposure <= LOW_EXP || exp_acc > light_exp) { /* light */ if (old_exposure <= VERY_LOW_EXP) { /* very light */ if (sd->exposure_status == EXPOSURE_VERY_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_LIGHT; sd->exposure_count = 1; } } else { /* just light */ if (sd->exposure_status == EXPOSURE_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_LIGHT; sd->exposure_count = 1; } } } else { /* not dark or light */ sd->exposure_status = EXPOSURE_NORMAL; } } else { /* Flicker control off */ if (old_exposure >= MAX_EXP && exp_acc < dark_exp) { /* dark */ if (exp_acc < very_dark_exp) { /* very dark */ if (sd->exposure_status == EXPOSURE_VERY_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_DARK; sd->exposure_count = 1; } } else { /* just dark */ if (sd->exposure_status == EXPOSURE_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_DARK; sd->exposure_count = 1; } } } else if (old_exposure <= LOW_EXP || exp_acc > light_exp) { /* light */ if (old_exposure <= VERY_LOW_EXP) { /* very light */ if (sd->exposure_status == EXPOSURE_VERY_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_LIGHT; sd->exposure_count = 1; } } else { /* just light */ if (sd->exposure_status == EXPOSURE_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_LIGHT; sd->exposure_count = 1; } } } else { /* not dark or light */ sd->exposure_status = EXPOSURE_NORMAL; } } framerate = atomic_read(&sd->fps); if (framerate > 30 || framerate < 1) framerate = 1; if (!sd->params.flickerControl.disabled) { /* Flicker control on */ if ((sd->exposure_status == EXPOSURE_VERY_DARK || sd->exposure_status == EXPOSURE_DARK) && sd->exposure_count >= DARK_TIME * framerate && sd->params.sensorFps.divisor < 2) { /* dark for too long */ ++sd->params.sensorFps.divisor; setfps = 1; sd->params.flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [sd->params.sensorFps.baserate] [sd->params.sensorFps.divisor]; setflicker = 1; new_exposure = sd->params.flickerControl.coarseJump-1; while (new_exposure < old_exposure / 2) new_exposure += sd->params.flickerControl.coarseJump; sd->params.exposure.coarseExpLo = new_exposure & 0xff; sd->params.exposure.coarseExpHi = new_exposure >> 8; setexp = 1; sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically decreasing sensor_fps\n"); } else if ((sd->exposure_status == EXPOSURE_VERY_LIGHT || sd->exposure_status == EXPOSURE_LIGHT) && sd->exposure_count >= LIGHT_TIME * framerate && sd->params.sensorFps.divisor > 0) { /* light for too long */ int max_exp = FIRMWARE_VERSION(1, 2) ? MAX_EXP_102 : MAX_EXP; --sd->params.sensorFps.divisor; setfps = 1; sd->params.flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [sd->params.sensorFps.baserate] [sd->params.sensorFps.divisor]; setflicker = 1; new_exposure = sd->params.flickerControl.coarseJump-1; while (new_exposure < 2 * old_exposure && new_exposure + sd->params.flickerControl.coarseJump < max_exp) new_exposure += sd->params.flickerControl.coarseJump; sd->params.exposure.coarseExpLo = new_exposure & 0xff; sd->params.exposure.coarseExpHi = new_exposure >> 8; setexp = 1; sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically increasing sensor_fps\n"); } } else { /* Flicker control off */ if ((sd->exposure_status == EXPOSURE_VERY_DARK || sd->exposure_status == EXPOSURE_DARK) && sd->exposure_count >= DARK_TIME * framerate && sd->params.sensorFps.divisor < 2) { /* dark for too long */ ++sd->params.sensorFps.divisor; setfps = 1; if (sd->params.exposure.gain > 0) { --sd->params.exposure.gain; setexp = 1; } sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically decreasing sensor_fps\n"); } else if ((sd->exposure_status == EXPOSURE_VERY_LIGHT || sd->exposure_status == EXPOSURE_LIGHT) && sd->exposure_count >= LIGHT_TIME * framerate && sd->params.sensorFps.divisor > 0) { /* light for too long */ --sd->params.sensorFps.divisor; setfps = 1; if (sd->params.exposure.gain < sd->params.exposure.gainMode - 1) { ++sd->params.exposure.gain; setexp = 1; } sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically increasing sensor_fps\n"); } } if (setexp) command_setexposure(gspca_dev); if (setfps) command_setsensorfps(gspca_dev); if (setflicker) command_setflickerctrl(gspca_dev); } /*-----------------------------------------------------------------*/ /* if flicker is switched off, this function switches it back on.It checks, however, that conditions are suitable before restarting it. This should only be called for firmware version 1.2. It also adjust the colour balance when an exposure step is detected - as long as flicker is running */ static void restart_flicker(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int cam_exposure, old_exp; if (!FIRMWARE_VERSION(1, 2)) return; cam_exposure = atomic_read(&sd->cam_exposure); if (sd->params.flickerControl.flickerMode == 0 || cam_exposure == 0) return; old_exp = sd->params.exposure.coarseExpLo + sd->params.exposure.coarseExpHi*256; /* see how far away camera exposure is from a valid flicker exposure value */ cam_exposure %= sd->params.flickerControl.coarseJump; if (!sd->params.flickerControl.disabled && cam_exposure <= sd->params.flickerControl.coarseJump - 3) { /* Flicker control auto-disabled */ sd->params.flickerControl.disabled = 1; } if (sd->params.flickerControl.disabled && old_exp > sd->params.flickerControl.coarseJump + ROUND_UP_EXP_FOR_FLICKER) { /* exposure is now high enough to switch flicker control back on */ set_flicker(gspca_dev, 1, 1); } } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ; reset_camera_params(gspca_dev); gspca_dbg(gspca_dev, D_PROBE, "cpia CPiA camera detected (vid/pid 0x%04X:0x%04X)\n", id->idVendor, id->idProduct); cam = &gspca_dev->cam; cam->cam_mode = mode; cam->nmodes = ARRAY_SIZE(mode); goto_low_power(gspca_dev); /* Check the firmware version. */ sd->params.version.firmwareVersion = 0; get_version_information(gspca_dev); if (sd->params.version.firmwareVersion != 1) { gspca_err(gspca_dev, "only firmware version 1 is supported (got: %d)\n", sd->params.version.firmwareVersion); return -ENODEV; } /* A bug in firmware 1-02 limits gainMode to 2 */ if (sd->params.version.firmwareRevision <= 2 && sd->params.exposure.gainMode > 2) { sd->params.exposure.gainMode = 2; } /* set QX3 detected flag */ sd->params.qx3.qx3_detected = (sd->params.pnpID.vendor == 0x0813 && sd->params.pnpID.product == 0x0001); return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int priv, ret; /* Start the camera in low power mode */ if (goto_low_power(gspca_dev)) { if (sd->params.status.systemState != WARM_BOOT_STATE) { gspca_err(gspca_dev, "unexpected systemstate: %02x\n", sd->params.status.systemState); printstatus(gspca_dev, &sd->params); return -ENODEV; } /* FIXME: this is just dirty trial and error */ ret = goto_high_power(gspca_dev); if (ret) return ret; ret = do_command(gspca_dev, CPIA_COMMAND_DiscardFrame, 0, 0, 0, 0); if (ret) return ret; ret = goto_low_power(gspca_dev); if (ret) return ret; } /* procedure described in developer's guide p3-28 */ /* Check the firmware version. */ sd->params.version.firmwareVersion = 0; get_version_information(gspca_dev); /* The fatal error checking should be done after * the camera powers up (developer's guide p 3-38) */ /* Set streamState before transition to high power to avoid bug * in firmware 1-02 */ ret = do_command(gspca_dev, CPIA_COMMAND_ModifyCameraStatus, STREAMSTATE, 0, STREAM_NOT_READY, 0); if (ret) return ret; /* GotoHiPower */ ret = goto_high_power(gspca_dev); if (ret) return ret; /* Check the camera status */ ret = do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); if (ret) return ret; if (sd->params.status.fatalError) { gspca_err(gspca_dev, "fatal_error: %04x, vp_status: %04x\n", sd->params.status.fatalError, sd->params.status.vpStatus); return -EIO; } /* VPVersion can't be retrieved before the camera is in HiPower, * so get it here instead of in get_version_information. */ ret = do_command(gspca_dev, CPIA_COMMAND_GetVPVersion, 0, 0, 0, 0); if (ret) return ret; /* Determine video mode settings */ sd->params.streamStartLine = 120; priv = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; if (priv & 0x01) { /* crop */ sd->params.roi.colStart = 2; sd->params.roi.rowStart = 6; } else { sd->params.roi.colStart = 0; sd->params.roi.rowStart = 0; } if (priv & 0x02) { /* quarter */ sd->params.format.videoSize = VIDEOSIZE_QCIF; sd->params.roi.colStart /= 2; sd->params.roi.rowStart /= 2; sd->params.streamStartLine /= 2; } else sd->params.format.videoSize = VIDEOSIZE_CIF; sd->params.roi.colEnd = sd->params.roi.colStart + (gspca_dev->pixfmt.width >> 3); sd->params.roi.rowEnd = sd->params.roi.rowStart + (gspca_dev->pixfmt.height >> 2); /* And now set the camera to a known state */ ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode, CPIA_GRAB_CONTINEOUS, 0, 0, 0); if (ret) return ret; /* We start with compression disabled, as we need one uncompressed frame to handle later compressed frames */ ret = do_command(gspca_dev, CPIA_COMMAND_SetCompression, CPIA_COMPRESSION_NONE, NO_DECIMATION, 0, 0); if (ret) return ret; ret = command_setcompressiontarget(gspca_dev); if (ret) return ret; ret = command_setcolourparams(gspca_dev); if (ret) return ret; ret = command_setformat(gspca_dev); if (ret) return ret; ret = command_setyuvtresh(gspca_dev); if (ret) return ret; ret = command_setecptiming(gspca_dev); if (ret) return ret; ret = command_setcompressionparams(gspca_dev); if (ret) return ret; ret = command_setexposure(gspca_dev); if (ret) return ret; ret = command_setcolourbalance(gspca_dev); if (ret) return ret; ret = command_setsensorfps(gspca_dev); if (ret) return ret; ret = command_setapcor(gspca_dev); if (ret) return ret; ret = command_setflickerctrl(gspca_dev); if (ret) return ret; ret = command_setvloffset(gspca_dev); if (ret) return ret; /* Start stream */ ret = command_resume(gspca_dev); if (ret) return ret; /* Wait 6 frames before turning compression on for the sensor to get all settings and AEC/ACB to settle */ sd->first_frame = 6; sd->exposure_status = EXPOSURE_NORMAL; sd->exposure_count = 0; atomic_set(&sd->cam_exposure, 0); atomic_set(&sd->fps, 0); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd __maybe_unused = (struct sd *) gspca_dev; command_pause(gspca_dev); /* save camera state for later open (developers guide ch 3.5.3) */ save_camera_state(gspca_dev); /* GotoLoPower */ goto_low_power(gspca_dev); /* Update the camera status */ do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); #if IS_ENABLED(CONFIG_INPUT) /* If the last button state is pressed, release it now! */ if (sd->params.qx3.button) { /* The camera latch will hold the pressed state until we reset the latch, so we do not reset sd->params.qx3.button now, to avoid a false keypress being reported the next sd_start */ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); } #endif } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; /* Start / Stop the camera to make sure we are talking to a supported camera, and to get some information from it to print. */ ret = sd_start(gspca_dev); if (ret) return ret; /* Ensure the QX3 illuminators' states are restored upon resume, or disable the illuminator controls, if this isn't a QX3 */ if (sd->params.qx3.qx3_detected) command_setlights(gspca_dev); sd_stopN(gspca_dev); gspca_dbg(gspca_dev, D_PROBE, "CPIA Version: %d.%02d (%d.%d)\n", sd->params.version.firmwareVersion, sd->params.version.firmwareRevision, sd->params.version.vcVersion, sd->params.version.vcRevision); gspca_dbg(gspca_dev, D_PROBE, "CPIA PnP-ID: %04x:%04x:%04x", sd->params.pnpID.vendor, sd->params.pnpID.product, sd->params.pnpID.deviceRevision); gspca_dbg(gspca_dev, D_PROBE, "VP-Version: %d.%d %04x", sd->params.vpVersion.vpVersion, sd->params.vpVersion.vpRevision, sd->params.vpVersion.cameraHeadID); return 0; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* Check for SOF */ if (len >= 64 && data[0] == MAGIC_0 && data[1] == MAGIC_1 && data[16] == sd->params.format.videoSize && data[17] == sd->params.format.subSample && data[18] == sd->params.format.yuvOrder && data[24] == sd->params.roi.colStart && data[25] == sd->params.roi.colEnd && data[26] == sd->params.roi.rowStart && data[27] == sd->params.roi.rowEnd) { u8 *image; atomic_set(&sd->cam_exposure, data[39] * 2); atomic_set(&sd->fps, data[41]); /* Check for proper EOF for last frame */ image = gspca_dev->image; if (image != NULL && gspca_dev->image_len > 4 && image[gspca_dev->image_len - 4] == 0xff && image[gspca_dev->image_len - 3] == 0xff && image[gspca_dev->image_len - 2] == 0xff && image[gspca_dev->image_len - 1] == 0xff) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static void sd_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* Set the normal compression settings once we have captured a few uncompressed frames (and AEC has hopefully settled) */ if (sd->first_frame) { sd->first_frame--; if (sd->first_frame == 0) command_setcompression(gspca_dev); } /* Switch flicker control back on if it got turned off */ restart_flicker(gspca_dev); /* If AEC is enabled, monitor the exposure and adjust the sensor frame rate if needed */ if (sd->params.exposure.expMode == 2) monitor_exposure(gspca_dev); /* Update our knowledge of the camera state */ do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0); do_command(gspca_dev, CPIA_COMMAND_ReadMCPorts, 0, 0, 0, 0); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming && ctrl->id != V4L2_CID_POWER_LINE_FREQUENCY) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: sd->params.colourParams.brightness = ctrl->val; sd->params.flickerControl.allowableOverExposure = find_over_exposure(sd->params.colourParams.brightness); gspca_dev->usb_err = command_setcolourparams(gspca_dev); if (!gspca_dev->usb_err) gspca_dev->usb_err = command_setflickerctrl(gspca_dev); break; case V4L2_CID_CONTRAST: sd->params.colourParams.contrast = ctrl->val; gspca_dev->usb_err = command_setcolourparams(gspca_dev); break; case V4L2_CID_SATURATION: sd->params.colourParams.saturation = ctrl->val; gspca_dev->usb_err = command_setcolourparams(gspca_dev); break; case V4L2_CID_POWER_LINE_FREQUENCY: sd->mainsFreq = ctrl->val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ; sd->params.flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [sd->params.sensorFps.baserate] [sd->params.sensorFps.divisor]; gspca_dev->usb_err = set_flicker(gspca_dev, ctrl->val != V4L2_CID_POWER_LINE_FREQUENCY_DISABLED, gspca_dev->streaming); break; case V4L2_CID_ILLUMINATORS_1: sd->params.qx3.bottomlight = ctrl->val; gspca_dev->usb_err = command_setlights(gspca_dev); break; case V4L2_CID_ILLUMINATORS_2: sd->params.qx3.toplight = ctrl->val; gspca_dev->usb_err = command_setlights(gspca_dev); break; case CPIA1_CID_COMP_TARGET: sd->params.compressionTarget.frTargeting = ctrl->val; gspca_dev->usb_err = command_setcompressiontarget(gspca_dev); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; static const char * const comp_target_menu[] = { "Quality", "Framerate", NULL }; static const struct v4l2_ctrl_config comp_target = { .ops = &sd_ctrl_ops, .id = CPIA1_CID_COMP_TARGET, .type = V4L2_CTRL_TYPE_MENU, .name = "Compression Target", .qmenu = comp_target_menu, .max = 1, .def = COMP_TARGET_DEF, }; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 7); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 100, 1, BRIGHTNESS_DEF); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 96, 8, CONTRAST_DEF); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 100, 1, SATURATION_DEF); sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, FREQ_DEF); if (sd->params.qx3.qx3_detected) { v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_ILLUMINATORS_1, 0, 1, 1, ILLUMINATORS_1_DEF); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_ILLUMINATORS_2, 0, 1, 1, ILLUMINATORS_2_DEF); } v4l2_ctrl_new_custom(hdl, &comp_target, NULL); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .dq_callback = sd_dq_callback, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .other_input = 1, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0553, 0x0002)}, {USB_DEVICE(0x0813, 0x0001)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
119 125 119 123 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 | // SPDX-License-Identifier: GPL-2.0 /* Generic nexthop implementation * * Copyright (c) 2017-19 Cumulus Networks * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com> */ #include <linux/nexthop.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/arp.h> #include <net/ipv6_stubs.h> #include <net/lwtunnel.h> #include <net/ndisc.h> #include <net/nexthop.h> #include <net/route.h> #include <net/sock.h> #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ) #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */ static void remove_nexthop(struct net *net, struct nexthop *nh, struct nl_info *nlinfo); #define NH_DEV_HASHBITS 8 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS) #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \ NHA_OP_FLAG_DUMP_HW_STATS) static const struct nla_policy rtm_nh_policy_new[] = { [NHA_ID] = { .type = NLA_U32 }, [NHA_GROUP] = { .type = NLA_BINARY }, [NHA_GROUP_TYPE] = { .type = NLA_U16 }, [NHA_BLACKHOLE] = { .type = NLA_FLAG }, [NHA_OIF] = { .type = NLA_U32 }, [NHA_GATEWAY] = { .type = NLA_BINARY }, [NHA_ENCAP_TYPE] = { .type = NLA_U16 }, [NHA_ENCAP] = { .type = NLA_NESTED }, [NHA_FDB] = { .type = NLA_FLAG }, [NHA_RES_GROUP] = { .type = NLA_NESTED }, [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true), }; static const struct nla_policy rtm_nh_policy_get[] = { [NHA_ID] = { .type = NLA_U32 }, [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, NHA_OP_FLAGS_DUMP_ALL), }; static const struct nla_policy rtm_nh_policy_del[] = { [NHA_ID] = { .type = NLA_U32 }, }; static const struct nla_policy rtm_nh_policy_dump[] = { [NHA_OIF] = { .type = NLA_U32 }, [NHA_GROUPS] = { .type = NLA_FLAG }, [NHA_MASTER] = { .type = NLA_U32 }, [NHA_FDB] = { .type = NLA_FLAG }, [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, NHA_OP_FLAGS_DUMP_ALL), }; static const struct nla_policy rtm_nh_res_policy_new[] = { [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 }, [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 }, [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 }, }; static const struct nla_policy rtm_nh_policy_dump_bucket[] = { [NHA_ID] = { .type = NLA_U32 }, [NHA_OIF] = { .type = NLA_U32 }, [NHA_MASTER] = { .type = NLA_U32 }, [NHA_RES_BUCKET] = { .type = NLA_NESTED }, }; static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = { [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 }, }; static const struct nla_policy rtm_nh_policy_get_bucket[] = { [NHA_ID] = { .type = NLA_U32 }, [NHA_RES_BUCKET] = { .type = NLA_NESTED }, }; static const struct nla_policy rtm_nh_res_bucket_policy_get[] = { [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 }, }; static bool nexthop_notifiers_is_empty(struct net *net) { return !net->nexthop.notifier_chain.head; } static void __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info, const struct nh_info *nhi) { nh_info->dev = nhi->fib_nhc.nhc_dev; nh_info->gw_family = nhi->fib_nhc.nhc_gw_family; if (nh_info->gw_family == AF_INET) nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4; else if (nh_info->gw_family == AF_INET6) nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6; nh_info->id = nhi->nh_parent->id; nh_info->is_reject = nhi->reject_nh; nh_info->is_fdb = nhi->fdb_nh; nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate; } static int nh_notifier_single_info_init(struct nh_notifier_info *info, const struct nexthop *nh) { struct nh_info *nhi = rtnl_dereference(nh->nh_info); info->type = NH_NOTIFIER_INFO_TYPE_SINGLE; info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL); if (!info->nh) return -ENOMEM; __nh_notifier_single_info_init(info->nh, nhi); return 0; } static void nh_notifier_single_info_fini(struct nh_notifier_info *info) { kfree(info->nh); } static int nh_notifier_mpath_info_init(struct nh_notifier_info *info, struct nh_group *nhg) { u16 num_nh = nhg->num_nh; int i; info->type = NH_NOTIFIER_INFO_TYPE_GRP; info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh), GFP_KERNEL); if (!info->nh_grp) return -ENOMEM; info->nh_grp->num_nh = num_nh; info->nh_grp->is_fdb = nhg->fdb_nh; info->nh_grp->hw_stats = nhg->hw_stats; for (i = 0; i < num_nh; i++) { struct nh_grp_entry *nhge = &nhg->nh_entries[i]; struct nh_info *nhi; nhi = rtnl_dereference(nhge->nh->nh_info); info->nh_grp->nh_entries[i].weight = nhge->weight; __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh, nhi); } return 0; } static int nh_notifier_res_table_info_init(struct nh_notifier_info *info, struct nh_group *nhg) { struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); u16 num_nh_buckets = res_table->num_nh_buckets; unsigned long size; u16 i; info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE; size = struct_size(info->nh_res_table, nhs, num_nh_buckets); info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); if (!info->nh_res_table) return -ENOMEM; info->nh_res_table->num_nh_buckets = num_nh_buckets; info->nh_res_table->hw_stats = nhg->hw_stats; for (i = 0; i < num_nh_buckets; i++) { struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; struct nh_grp_entry *nhge; struct nh_info *nhi; nhge = rtnl_dereference(bucket->nh_entry); nhi = rtnl_dereference(nhge->nh->nh_info); __nh_notifier_single_info_init(&info->nh_res_table->nhs[i], nhi); } return 0; } static int nh_notifier_grp_info_init(struct nh_notifier_info *info, const struct nexthop *nh) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); if (nhg->hash_threshold) return nh_notifier_mpath_info_init(info, nhg); else if (nhg->resilient) return nh_notifier_res_table_info_init(info, nhg); return -EINVAL; } static void nh_notifier_grp_info_fini(struct nh_notifier_info *info, const struct nexthop *nh) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); if (nhg->hash_threshold) kfree(info->nh_grp); else if (nhg->resilient) vfree(info->nh_res_table); } static int nh_notifier_info_init(struct nh_notifier_info *info, const struct nexthop *nh) { info->id = nh->id; if (nh->is_group) return nh_notifier_grp_info_init(info, nh); else return nh_notifier_single_info_init(info, nh); } static void nh_notifier_info_fini(struct nh_notifier_info *info, const struct nexthop *nh) { if (nh->is_group) nh_notifier_grp_info_fini(info, nh); else nh_notifier_single_info_fini(info); } static int call_nexthop_notifiers(struct net *net, enum nexthop_event_type event_type, struct nexthop *nh, struct netlink_ext_ack *extack) { struct nh_notifier_info info = { .net = net, .extack = extack, }; int err; ASSERT_RTNL(); if (nexthop_notifiers_is_empty(net)) return 0; err = nh_notifier_info_init(&info, nh); if (err) { NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); return err; } err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, event_type, &info); nh_notifier_info_fini(&info, nh); return notifier_to_errno(err); } static int nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info, bool force, unsigned int *p_idle_timer_ms) { struct nh_res_table *res_table; struct nh_group *nhg; struct nexthop *nh; int err = 0; /* When 'force' is false, nexthop bucket replacement is performed * because the bucket was deemed to be idle. In this case, capable * listeners can choose to perform an atomic replacement: The bucket is * only replaced if it is inactive. However, if the idle timer interval * is smaller than the interval in which a listener is querying * buckets' activity from the device, then atomic replacement should * not be tried. Pass the idle timer value to listeners, so that they * could determine which type of replacement to perform. */ if (force) { *p_idle_timer_ms = 0; return 0; } rcu_read_lock(); nh = nexthop_find_by_id(info->net, info->id); if (!nh) { err = -EINVAL; goto out; } nhg = rcu_dereference(nh->nh_grp); res_table = rcu_dereference(nhg->res_table); *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer); out: rcu_read_unlock(); return err; } static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info, u16 bucket_index, bool force, struct nh_info *oldi, struct nh_info *newi) { unsigned int idle_timer_ms; int err; err = nh_notifier_res_bucket_idle_timer_get(info, force, &idle_timer_ms); if (err) return err; info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET; info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket), GFP_KERNEL); if (!info->nh_res_bucket) return -ENOMEM; info->nh_res_bucket->bucket_index = bucket_index; info->nh_res_bucket->idle_timer_ms = idle_timer_ms; info->nh_res_bucket->force = force; __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi); __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi); return 0; } static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info) { kfree(info->nh_res_bucket); } static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, u16 bucket_index, bool force, struct nh_info *oldi, struct nh_info *newi, struct netlink_ext_ack *extack) { struct nh_notifier_info info = { .net = net, .extack = extack, .id = nhg_id, }; int err; if (nexthop_notifiers_is_empty(net)) return 0; err = nh_notifier_res_bucket_info_init(&info, bucket_index, force, oldi, newi); if (err) return err; err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, NEXTHOP_EVENT_BUCKET_REPLACE, &info); nh_notifier_res_bucket_info_fini(&info); return notifier_to_errno(err); } /* There are three users of RES_TABLE, and NHs etc. referenced from there: * * 1) a collection of callbacks for NH maintenance. This operates under * RTNL, * 2) the delayed work that gradually balances the resilient table, * 3) and nexthop_select_path(), operating under RCU. * * Both the delayed work and the RTNL block are writers, and need to * maintain mutual exclusion. Since there are only two and well-known * writers for each table, the RTNL code can make sure it has exclusive * access thus: * * - Have the DW operate without locking; * - synchronously cancel the DW; * - do the writing; * - if the write was not actually a delete, call upkeep, which schedules * DW again if necessary. * * The functions that are always called from the RTNL context use * rtnl_dereference(). The functions that can also be called from the DW do * a raw dereference and rely on the above mutual exclusion scheme. */ #define nh_res_dereference(p) (rcu_dereference_raw(p)) static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, u16 bucket_index, bool force, struct nexthop *old_nh, struct nexthop *new_nh, struct netlink_ext_ack *extack) { struct nh_info *oldi = nh_res_dereference(old_nh->nh_info); struct nh_info *newi = nh_res_dereference(new_nh->nh_info); return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index, force, oldi, newi, extack); } static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh, struct netlink_ext_ack *extack) { struct nh_notifier_info info = { .net = net, .extack = extack, .id = nh->id, }; struct nh_group *nhg; int err; ASSERT_RTNL(); if (nexthop_notifiers_is_empty(net)) return 0; /* At this point, the nexthop buckets are still not populated. Only * emit a notification with the logical nexthops, so that a listener * could potentially veto it in case of unsupported configuration. */ nhg = rtnl_dereference(nh->nh_grp); err = nh_notifier_mpath_info_init(&info, nhg); if (err) { NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); return err; } err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE, &info); kfree(info.nh_grp); return notifier_to_errno(err); } static int call_nexthop_notifier(struct notifier_block *nb, struct net *net, enum nexthop_event_type event_type, struct nexthop *nh, struct netlink_ext_ack *extack) { struct nh_notifier_info info = { .net = net, .extack = extack, }; int err; err = nh_notifier_info_init(&info, nh); if (err) return err; err = nb->notifier_call(nb, event_type, &info); nh_notifier_info_fini(&info, nh); return notifier_to_errno(err); } static unsigned int nh_dev_hashfn(unsigned int val) { unsigned int mask = NH_DEV_HASHSIZE - 1; return (val ^ (val >> NH_DEV_HASHBITS) ^ (val >> (NH_DEV_HASHBITS * 2))) & mask; } static void nexthop_devhash_add(struct net *net, struct nh_info *nhi) { struct net_device *dev = nhi->fib_nhc.nhc_dev; struct hlist_head *head; unsigned int hash; WARN_ON(!dev); hash = nh_dev_hashfn(dev->ifindex); head = &net->nexthop.devhash[hash]; hlist_add_head(&nhi->dev_hash, head); } static void nexthop_free_group(struct nexthop *nh) { struct nh_group *nhg; int i; nhg = rcu_dereference_raw(nh->nh_grp); for (i = 0; i < nhg->num_nh; ++i) { struct nh_grp_entry *nhge = &nhg->nh_entries[i]; WARN_ON(!list_empty(&nhge->nh_list)); free_percpu(nhge->stats); nexthop_put(nhge->nh); } WARN_ON(nhg->spare == nhg); if (nhg->resilient) vfree(rcu_dereference_raw(nhg->res_table)); kfree(nhg->spare); kfree(nhg); } static void nexthop_free_single(struct nexthop *nh) { struct nh_info *nhi; nhi = rcu_dereference_raw(nh->nh_info); switch (nhi->family) { case AF_INET: fib_nh_release(nh->net, &nhi->fib_nh); break; case AF_INET6: ipv6_stub->fib6_nh_release(&nhi->fib6_nh); break; } kfree(nhi); } void nexthop_free_rcu(struct rcu_head *head) { struct nexthop *nh = container_of(head, struct nexthop, rcu); if (nh->is_group) nexthop_free_group(nh); else nexthop_free_single(nh); kfree(nh); } EXPORT_SYMBOL_GPL(nexthop_free_rcu); static struct nexthop *nexthop_alloc(void) { struct nexthop *nh; nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL); if (nh) { INIT_LIST_HEAD(&nh->fi_list); INIT_LIST_HEAD(&nh->f6i_list); INIT_LIST_HEAD(&nh->grp_list); INIT_LIST_HEAD(&nh->fdb_list); spin_lock_init(&nh->lock); } return nh; } static struct nh_group *nexthop_grp_alloc(u16 num_nh) { struct nh_group *nhg; nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL); if (nhg) nhg->num_nh = num_nh; return nhg; } static void nh_res_table_upkeep_dw(struct work_struct *work); static struct nh_res_table * nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg) { const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets; struct nh_res_table *res_table; unsigned long size; size = struct_size(res_table, nh_buckets, num_nh_buckets); res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); if (!res_table) return NULL; res_table->net = net; res_table->nhg_id = nhg_id; INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw); INIT_LIST_HEAD(&res_table->uw_nh_entries); res_table->idle_timer = cfg->nh_grp_res_idle_timer; res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer; res_table->num_nh_buckets = num_nh_buckets; return res_table; } static void nh_base_seq_inc(struct net *net) { while (++net->nexthop.seq == 0) ; } /* no reference taken; rcu lock or rtnl must be held */ struct nexthop *nexthop_find_by_id(struct net *net, u32 id) { struct rb_node **pp, *parent = NULL, *next; pp = &net->nexthop.rb_root.rb_node; while (1) { struct nexthop *nh; next = rcu_dereference_raw(*pp); if (!next) break; parent = next; nh = rb_entry(parent, struct nexthop, rb_node); if (id < nh->id) pp = &next->rb_left; else if (id > nh->id) pp = &next->rb_right; else return nh; } return NULL; } EXPORT_SYMBOL_GPL(nexthop_find_by_id); /* used for auto id allocation; called with rtnl held */ static u32 nh_find_unused_id(struct net *net) { u32 id_start = net->nexthop.last_id_allocated; while (1) { net->nexthop.last_id_allocated++; if (net->nexthop.last_id_allocated == id_start) break; if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated)) return net->nexthop.last_id_allocated; } return 0; } static void nh_res_time_set_deadline(unsigned long next_time, unsigned long *deadline) { if (time_before(next_time, *deadline)) *deadline = next_time; } static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table) { if (list_empty(&res_table->uw_nh_entries)) return 0; return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since); } static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg) { struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); struct nlattr *nest; nest = nla_nest_start(skb, NHA_RES_GROUP); if (!nest) return -EMSGSIZE; if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS, res_table->num_nh_buckets) || nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER, jiffies_to_clock_t(res_table->idle_timer)) || nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER, jiffies_to_clock_t(res_table->unbalanced_timer)) || nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME, nh_res_table_unbalanced_time(res_table), NHA_RES_GROUP_PAD)) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge) { struct nh_grp_entry_stats *cpu_stats; cpu_stats = get_cpu_ptr(nhge->stats); u64_stats_update_begin(&cpu_stats->syncp); u64_stats_inc(&cpu_stats->packets); u64_stats_update_end(&cpu_stats->syncp); put_cpu_ptr(cpu_stats); } static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge, u64 *ret_packets) { int i; *ret_packets = 0; for_each_possible_cpu(i) { struct nh_grp_entry_stats *cpu_stats; unsigned int start; u64 packets; cpu_stats = per_cpu_ptr(nhge->stats, i); do { start = u64_stats_fetch_begin(&cpu_stats->syncp); packets = u64_stats_read(&cpu_stats->packets); } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); *ret_packets += packets; } } static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info, const struct nexthop *nh) { struct nh_group *nhg; int i; ASSERT_RTNL(); nhg = rtnl_dereference(nh->nh_grp); info->id = nh->id; info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS; info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats, stats, nhg->num_nh), GFP_KERNEL); if (!info->nh_grp_hw_stats) return -ENOMEM; info->nh_grp_hw_stats->num_nh = nhg->num_nh; for (i = 0; i < nhg->num_nh; i++) { struct nh_grp_entry *nhge = &nhg->nh_entries[i]; info->nh_grp_hw_stats->stats[i].id = nhge->nh->id; } return 0; } static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info) { kfree(info->nh_grp_hw_stats); } void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info, unsigned int nh_idx, u64 delta_packets) { info->hw_stats_used = true; info->stats[nh_idx].packets += delta_packets; } EXPORT_SYMBOL(nh_grp_hw_stats_report_delta); static void nh_grp_hw_stats_apply_update(struct nexthop *nh, struct nh_notifier_info *info) { struct nh_group *nhg; int i; ASSERT_RTNL(); nhg = rtnl_dereference(nh->nh_grp); for (i = 0; i < nhg->num_nh; i++) { struct nh_grp_entry *nhge = &nhg->nh_entries[i]; nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets; } } static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used) { struct nh_notifier_info info = { .net = nh->net, }; struct net *net = nh->net; int err; if (nexthop_notifiers_is_empty(net)) { *hw_stats_used = false; return 0; } err = nh_notifier_grp_hw_stats_init(&info, nh); if (err) return err; err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, NEXTHOP_EVENT_HW_STATS_REPORT_DELTA, &info); /* Cache whatever we got, even if there was an error, otherwise the * successful stats retrievals would get lost. */ nh_grp_hw_stats_apply_update(nh, &info); *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used; nh_notifier_grp_hw_stats_fini(&info); return notifier_to_errno(err); } static int nla_put_nh_group_stats_entry(struct sk_buff *skb, struct nh_grp_entry *nhge, u32 op_flags) { struct nlattr *nest; u64 packets; nh_grp_entry_stats_read(nhge, &packets); nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY); if (!nest) return -EMSGSIZE; if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) || nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS, packets + nhge->packets_hw)) goto nla_put_failure; if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW, nhge->packets_hw)) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh, u32 op_flags) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); struct nlattr *nest; bool hw_stats_used; int err; int i; if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats)) goto err_out; if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS && nhg->hw_stats) { err = nh_grp_hw_stats_update(nh, &hw_stats_used); if (err) goto out; if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used)) goto err_out; } nest = nla_nest_start(skb, NHA_GROUP_STATS); if (!nest) goto err_out; for (i = 0; i < nhg->num_nh; i++) if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i], op_flags)) goto cancel_out; nla_nest_end(skb, nest); return 0; cancel_out: nla_nest_cancel(skb, nest); err_out: err = -EMSGSIZE; out: return err; } static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh, u32 op_flags, u32 *resp_op_flags) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); struct nexthop_grp *p; size_t len = nhg->num_nh * sizeof(*p); struct nlattr *nla; u16 group_type = 0; u16 weight; int i; *resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0; if (nhg->hash_threshold) group_type = NEXTHOP_GRP_TYPE_MPATH; else if (nhg->resilient) group_type = NEXTHOP_GRP_TYPE_RES; if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type)) goto nla_put_failure; nla = nla_reserve(skb, NHA_GROUP, len); if (!nla) goto nla_put_failure; p = nla_data(nla); for (i = 0; i < nhg->num_nh; ++i) { weight = nhg->nh_entries[i].weight - 1; *p++ = (struct nexthop_grp) { .id = nhg->nh_entries[i].nh->id, .weight = weight, .weight_high = weight >> 8, }; } if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) goto nla_put_failure; if (op_flags & NHA_OP_FLAG_DUMP_STATS && (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) || nla_put_nh_group_stats(skb, nh, op_flags))) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, int event, u32 portid, u32 seq, unsigned int nlflags, u32 op_flags) { struct fib6_nh *fib6_nh; struct fib_nh *fib_nh; struct nlmsghdr *nlh; struct nh_info *nhi; struct nhmsg *nhm; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); if (!nlh) return -EMSGSIZE; nhm = nlmsg_data(nlh); nhm->nh_family = AF_UNSPEC; nhm->nh_flags = nh->nh_flags; nhm->nh_protocol = nh->protocol; nhm->nh_scope = 0; nhm->resvd = 0; if (nla_put_u32(skb, NHA_ID, nh->id)) goto nla_put_failure; if (nh->is_group) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); u32 resp_op_flags = 0; if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) goto nla_put_failure; if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) || nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags)) goto nla_put_failure; goto out; } nhi = rtnl_dereference(nh->nh_info); nhm->nh_family = nhi->family; if (nhi->reject_nh) { if (nla_put_flag(skb, NHA_BLACKHOLE)) goto nla_put_failure; goto out; } else if (nhi->fdb_nh) { if (nla_put_flag(skb, NHA_FDB)) goto nla_put_failure; } else { const struct net_device *dev; dev = nhi->fib_nhc.nhc_dev; if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex)) goto nla_put_failure; } nhm->nh_scope = nhi->fib_nhc.nhc_scope; switch (nhi->family) { case AF_INET: fib_nh = &nhi->fib_nh; if (fib_nh->fib_nh_gw_family && nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4)) goto nla_put_failure; break; case AF_INET6: fib6_nh = &nhi->fib6_nh; if (fib6_nh->fib_nh_gw_family && nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6)) goto nla_put_failure; break; } if (nhi->fib_nhc.nhc_lwtstate && lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate, NHA_ENCAP, NHA_ENCAP_TYPE) < 0) goto nla_put_failure; out: nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg) { return nla_total_size(0) + /* NHA_RES_GROUP */ nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */ nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */ nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */ nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */ } static size_t nh_nlmsg_size_grp(struct nexthop *nh) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh; size_t tot = nla_total_size(sz) + nla_total_size(2); /* NHA_GROUP_TYPE */ if (nhg->resilient) tot += nh_nlmsg_size_grp_res(nhg); return tot; } static size_t nh_nlmsg_size_single(struct nexthop *nh) { struct nh_info *nhi = rtnl_dereference(nh->nh_info); size_t sz; /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE * are mutually exclusive */ sz = nla_total_size(4); /* NHA_OIF */ switch (nhi->family) { case AF_INET: if (nhi->fib_nh.fib_nh_gw_family) sz += nla_total_size(4); /* NHA_GATEWAY */ break; case AF_INET6: /* NHA_GATEWAY */ if (nhi->fib6_nh.fib_nh_gw_family) sz += nla_total_size(sizeof(const struct in6_addr)); break; } if (nhi->fib_nhc.nhc_lwtstate) { sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate); sz += nla_total_size(2); /* NHA_ENCAP_TYPE */ } return sz; } static size_t nh_nlmsg_size(struct nexthop *nh) { size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg)); sz += nla_total_size(4); /* NHA_ID */ if (nh->is_group) sz += nh_nlmsg_size_grp(nh) + nla_total_size(4) + /* NHA_OP_FLAGS */ 0; else sz += nh_nlmsg_size_single(nh); return sz; } static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info) { unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0; u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any()); if (!skb) goto errout; err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0); if (err < 0) { /* -EMSGSIZE implies BUG in nh_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP, info->nlh, gfp_any()); return; errout: rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err); } static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket) { return (unsigned long)atomic_long_read(&bucket->used_time); } static unsigned long nh_res_bucket_idle_point(const struct nh_res_table *res_table, const struct nh_res_bucket *bucket, unsigned long now) { unsigned long time = nh_res_bucket_used_time(bucket); /* Bucket was not used since it was migrated. The idle time is now. */ if (time == bucket->migrated_time) return now; return time + res_table->idle_timer; } static unsigned long nh_res_table_unb_point(const struct nh_res_table *res_table) { return res_table->unbalanced_since + res_table->unbalanced_timer; } static void nh_res_bucket_set_idle(const struct nh_res_table *res_table, struct nh_res_bucket *bucket) { unsigned long now = jiffies; atomic_long_set(&bucket->used_time, (long)now); bucket->migrated_time = now; } static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket) { atomic_long_set(&bucket->used_time, (long)jiffies); } static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket) { unsigned long used_time = nh_res_bucket_used_time(bucket); return jiffies_delta_to_clock_t(jiffies - used_time); } static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh, struct nh_res_bucket *bucket, u16 bucket_index, int event, u32 portid, u32 seq, unsigned int nlflags, struct netlink_ext_ack *extack) { struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); struct nlmsghdr *nlh; struct nlattr *nest; struct nhmsg *nhm; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); if (!nlh) return -EMSGSIZE; nhm = nlmsg_data(nlh); nhm->nh_family = AF_UNSPEC; nhm->nh_flags = bucket->nh_flags; nhm->nh_protocol = nh->protocol; nhm->nh_scope = 0; nhm->resvd = 0; if (nla_put_u32(skb, NHA_ID, nh->id)) goto nla_put_failure; nest = nla_nest_start(skb, NHA_RES_BUCKET); if (!nest) goto nla_put_failure; if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) || nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) || nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME, nh_res_bucket_idle_time(bucket), NHA_RES_BUCKET_PAD)) goto nla_put_failure_nest; nla_nest_end(skb, nest); nlmsg_end(skb, nlh); return 0; nla_put_failure_nest: nla_nest_cancel(skb, nest); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static void nexthop_bucket_notify(struct nh_res_table *res_table, u16 bucket_index) { struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); struct nexthop *nh = nhge->nh_parent; struct sk_buff *skb; int err = -ENOBUFS; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) goto errout; err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE, NULL); if (err < 0) { kfree_skb(skb); goto errout; } rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err); } static bool valid_group_nh(struct nexthop *nh, unsigned int npaths, bool *is_fdb, struct netlink_ext_ack *extack) { if (nh->is_group) { struct nh_group *nhg = rtnl_dereference(nh->nh_grp); /* Nesting groups within groups is not supported. */ if (nhg->hash_threshold) { NL_SET_ERR_MSG(extack, "Hash-threshold group can not be a nexthop within a group"); return false; } if (nhg->resilient) { NL_SET_ERR_MSG(extack, "Resilient group can not be a nexthop within a group"); return false; } *is_fdb = nhg->fdb_nh; } else { struct nh_info *nhi = rtnl_dereference(nh->nh_info); if (nhi->reject_nh && npaths > 1) { NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be used in a group with more than 1 path"); return false; } *is_fdb = nhi->fdb_nh; } return true; } static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family, struct netlink_ext_ack *extack) { struct nh_info *nhi; nhi = rtnl_dereference(nh->nh_info); if (!nhi->fdb_nh) { NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops"); return -EINVAL; } if (*nh_family == AF_UNSPEC) { *nh_family = nhi->family; } else if (*nh_family != nhi->family) { NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops"); return -EINVAL; } return 0; } static int nh_check_attr_group(struct net *net, struct nlattr *tb[], size_t tb_size, u16 nh_grp_type, struct netlink_ext_ack *extack) { unsigned int len = nla_len(tb[NHA_GROUP]); struct nexthop_grp *nhg; unsigned int i, j; if (!len || len & (sizeof(struct nexthop_grp) - 1)) { NL_SET_ERR_MSG(extack, "Invalid length for nexthop group attribute"); return -EINVAL; } /* convert len to number of nexthop ids */ len /= sizeof(*nhg); nhg = nla_data(tb[NHA_GROUP]); for (i = 0; i < len; ++i) { if (nhg[i].resvd2) { NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be |